kern_thread.c revision 113793
1/* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * $FreeBSD: head/sys/kern/kern_thread.c 113793 2003-04-21 07:27:59Z davidxu $ 29 */ 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/kernel.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/smp.h> 39#include <sys/sysctl.h> 40#include <sys/sysproto.h> 41#include <sys/filedesc.h> 42#include <sys/sched.h> 43#include <sys/signalvar.h> 44#include <sys/sx.h> 45#include <sys/tty.h> 46#include <sys/user.h> 47#include <sys/jail.h> 48#include <sys/kse.h> 49#include <sys/ktr.h> 50#include <sys/ucontext.h> 51 52#include <vm/vm.h> 53#include <vm/vm_object.h> 54#include <vm/pmap.h> 55#include <vm/uma.h> 56#include <vm/vm_map.h> 57 58#include <machine/frame.h> 59 60/* 61 * KSEGRP related storage. 62 */ 63static uma_zone_t ksegrp_zone; 64static uma_zone_t kse_zone; 65static uma_zone_t thread_zone; 66static uma_zone_t upcall_zone; 67 68/* DEBUG ONLY */ 69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70static int thread_debug = 0; 71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72 &thread_debug, 0, "thread debug"); 73 74static int max_threads_per_proc = 30; 75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76 &max_threads_per_proc, 0, "Limit on threads per proc"); 77 78static int max_groups_per_proc = 5; 79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81 82static int max_threads_hits; 83SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84 &max_threads_hits, 0, ""); 85 86static int virtual_cpu; 87 88#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 89 90TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93TAILQ_HEAD(, kse_upcall) zombie_upcalls = 94 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95struct mtx kse_zombie_lock; 96MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 97 98static void kse_purge(struct proc *p, struct thread *td); 99static void kse_purge_group(struct thread *td); 100static int thread_update_usr_ticks(struct thread *td, int user); 101static void thread_alloc_spare(struct thread *td, struct thread *spare); 102 103static int 104sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 105{ 106 int error, new_val; 107 int def_val; 108 109#ifdef SMP 110 def_val = mp_ncpus; 111#else 112 def_val = 1; 113#endif 114 if (virtual_cpu == 0) 115 new_val = def_val; 116 else 117 new_val = virtual_cpu; 118 error = sysctl_handle_int(oidp, &new_val, 0, req); 119 if (error != 0 || req->newptr == NULL) 120 return (error); 121 if (new_val < 0) 122 return (EINVAL); 123 virtual_cpu = new_val; 124 return (0); 125} 126 127/* DEBUG ONLY */ 128SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 129 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 130 "debug virtual cpus"); 131 132/* 133 * Prepare a thread for use. 134 */ 135static void 136thread_ctor(void *mem, int size, void *arg) 137{ 138 struct thread *td; 139 140 td = (struct thread *)mem; 141 td->td_state = TDS_INACTIVE; 142 td->td_oncpu = NOCPU; 143} 144 145/* 146 * Reclaim a thread after use. 147 */ 148static void 149thread_dtor(void *mem, int size, void *arg) 150{ 151 struct thread *td; 152 153 td = (struct thread *)mem; 154 155#ifdef INVARIANTS 156 /* Verify that this thread is in a safe state to free. */ 157 switch (td->td_state) { 158 case TDS_INHIBITED: 159 case TDS_RUNNING: 160 case TDS_CAN_RUN: 161 case TDS_RUNQ: 162 /* 163 * We must never unlink a thread that is in one of 164 * these states, because it is currently active. 165 */ 166 panic("bad state for thread unlinking"); 167 /* NOTREACHED */ 168 case TDS_INACTIVE: 169 break; 170 default: 171 panic("bad thread state"); 172 /* NOTREACHED */ 173 } 174#endif 175} 176 177/* 178 * Initialize type-stable parts of a thread (when newly created). 179 */ 180static void 181thread_init(void *mem, int size) 182{ 183 struct thread *td; 184 185 td = (struct thread *)mem; 186 mtx_lock(&Giant); 187 pmap_new_thread(td, 0); 188 mtx_unlock(&Giant); 189 cpu_thread_setup(td); 190 td->td_sched = (struct td_sched *)&td[1]; 191} 192 193/* 194 * Tear down type-stable parts of a thread (just before being discarded). 195 */ 196static void 197thread_fini(void *mem, int size) 198{ 199 struct thread *td; 200 201 td = (struct thread *)mem; 202 pmap_dispose_thread(td); 203} 204 205/* 206 * Initialize type-stable parts of a kse (when newly created). 207 */ 208static void 209kse_init(void *mem, int size) 210{ 211 struct kse *ke; 212 213 ke = (struct kse *)mem; 214 ke->ke_sched = (struct ke_sched *)&ke[1]; 215} 216 217/* 218 * Initialize type-stable parts of a ksegrp (when newly created). 219 */ 220static void 221ksegrp_init(void *mem, int size) 222{ 223 struct ksegrp *kg; 224 225 kg = (struct ksegrp *)mem; 226 kg->kg_sched = (struct kg_sched *)&kg[1]; 227} 228 229/* 230 * KSE is linked into kse group. 231 */ 232void 233kse_link(struct kse *ke, struct ksegrp *kg) 234{ 235 struct proc *p = kg->kg_proc; 236 237 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 238 kg->kg_kses++; 239 ke->ke_state = KES_UNQUEUED; 240 ke->ke_proc = p; 241 ke->ke_ksegrp = kg; 242 ke->ke_thread = NULL; 243 ke->ke_oncpu = NOCPU; 244 ke->ke_flags = 0; 245} 246 247void 248kse_unlink(struct kse *ke) 249{ 250 struct ksegrp *kg; 251 252 mtx_assert(&sched_lock, MA_OWNED); 253 kg = ke->ke_ksegrp; 254 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 255 if (ke->ke_state == KES_IDLE) { 256 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 257 kg->kg_idle_kses--; 258 } 259 if (--kg->kg_kses == 0) 260 ksegrp_unlink(kg); 261 /* 262 * Aggregate stats from the KSE 263 */ 264 kse_stash(ke); 265} 266 267void 268ksegrp_link(struct ksegrp *kg, struct proc *p) 269{ 270 271 TAILQ_INIT(&kg->kg_threads); 272 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 273 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 274 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 275 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 276 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 277 kg->kg_proc = p; 278 /* 279 * the following counters are in the -zero- section 280 * and may not need clearing 281 */ 282 kg->kg_numthreads = 0; 283 kg->kg_runnable = 0; 284 kg->kg_kses = 0; 285 kg->kg_runq_kses = 0; /* XXXKSE change name */ 286 kg->kg_idle_kses = 0; 287 kg->kg_numupcalls = 0; 288 /* link it in now that it's consistent */ 289 p->p_numksegrps++; 290 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 291} 292 293void 294ksegrp_unlink(struct ksegrp *kg) 295{ 296 struct proc *p; 297 298 mtx_assert(&sched_lock, MA_OWNED); 299 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 300 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 301 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 302 303 p = kg->kg_proc; 304 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 305 p->p_numksegrps--; 306 /* 307 * Aggregate stats from the KSE 308 */ 309 ksegrp_stash(kg); 310} 311 312struct kse_upcall * 313upcall_alloc(void) 314{ 315 struct kse_upcall *ku; 316 317 ku = uma_zalloc(upcall_zone, M_WAITOK); 318 bzero(ku, sizeof(*ku)); 319 return (ku); 320} 321 322void 323upcall_free(struct kse_upcall *ku) 324{ 325 326 uma_zfree(upcall_zone, ku); 327} 328 329void 330upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 331{ 332 333 mtx_assert(&sched_lock, MA_OWNED); 334 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 335 ku->ku_ksegrp = kg; 336 kg->kg_numupcalls++; 337} 338 339void 340upcall_unlink(struct kse_upcall *ku) 341{ 342 struct ksegrp *kg = ku->ku_ksegrp; 343 344 mtx_assert(&sched_lock, MA_OWNED); 345 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 346 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 347 kg->kg_numupcalls--; 348 upcall_stash(ku); 349} 350 351void 352upcall_remove(struct thread *td) 353{ 354 355 if (td->td_upcall) { 356 td->td_upcall->ku_owner = NULL; 357 upcall_unlink(td->td_upcall); 358 td->td_upcall = 0; 359 } 360} 361 362/* 363 * For a newly created process, 364 * link up all the structures and its initial threads etc. 365 */ 366void 367proc_linkup(struct proc *p, struct ksegrp *kg, 368 struct kse *ke, struct thread *td) 369{ 370 371 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 372 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 373 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 374 p->p_numksegrps = 0; 375 p->p_numthreads = 0; 376 377 ksegrp_link(kg, p); 378 kse_link(ke, kg); 379 thread_link(td, kg); 380} 381 382/* 383struct kse_thr_interrupt_args { 384 struct kse_thr_mailbox * tmbx; 385}; 386*/ 387int 388kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 389{ 390 struct proc *p; 391 struct thread *td2; 392 393 p = td->td_proc; 394 if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) 395 return (EINVAL); 396 mtx_lock_spin(&sched_lock); 397 FOREACH_THREAD_IN_PROC(p, td2) { 398 if (td2->td_mailbox == uap->tmbx) { 399 td2->td_flags |= TDF_INTERRUPT; 400 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 401 if (td2->td_flags & TDF_CVWAITQ) 402 cv_abort(td2); 403 else 404 abortsleep(td2); 405 } 406 mtx_unlock_spin(&sched_lock); 407 return (0); 408 } 409 } 410 mtx_unlock_spin(&sched_lock); 411 return (ESRCH); 412} 413 414/* 415struct kse_exit_args { 416 register_t dummy; 417}; 418*/ 419int 420kse_exit(struct thread *td, struct kse_exit_args *uap) 421{ 422 struct proc *p; 423 struct ksegrp *kg; 424 struct kse *ke; 425 426 p = td->td_proc; 427 if (td->td_upcall == NULL || TD_CAN_UNBIND(td)) 428 return (EINVAL); 429 kg = td->td_ksegrp; 430 /* Serialize removing upcall */ 431 PROC_LOCK(p); 432 mtx_lock_spin(&sched_lock); 433 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { 434 mtx_unlock_spin(&sched_lock); 435 PROC_UNLOCK(p); 436 return (EDEADLK); 437 } 438 ke = td->td_kse; 439 upcall_remove(td); 440 if (p->p_numthreads == 1) { 441 kse_purge(p, td); 442 p->p_flag &= ~P_THREADED; 443 mtx_unlock_spin(&sched_lock); 444 PROC_UNLOCK(p); 445 } else { 446 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 447 kse_purge_group(td); 448 ke->ke_flags |= KEF_EXIT; 449 } 450 thread_stopped(p); 451 thread_exit(); 452 /* NOTREACHED */ 453 } 454 return (0); 455} 456 457/* 458 * Either becomes an upcall or waits for an awakening event and 459 * then becomes an upcall. Only error cases return. 460 */ 461/* 462struct kse_release_args { 463 struct timespec *timeout; 464}; 465*/ 466int 467kse_release(struct thread *td, struct kse_release_args *uap) 468{ 469 struct proc *p; 470 struct ksegrp *kg; 471 struct timespec ts, ts2, ts3, timeout; 472 struct timeval tv; 473 int error; 474 475 p = td->td_proc; 476 kg = td->td_ksegrp; 477 if (td->td_upcall == NULL || TD_CAN_UNBIND(td)) 478 return (EINVAL); 479 if (uap->timeout != NULL) { 480 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 481 return (error); 482 getnanouptime(&ts); 483 timespecadd(&ts, &timeout); 484 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 485 } 486 mtx_lock_spin(&sched_lock); 487 /* Change OURSELF to become an upcall. */ 488 td->td_flags = TDF_UPCALLING; 489#if 0 /* XXX This shouldn't be necessary */ 490 if (p->p_sflag & PS_NEEDSIGCHK) 491 td->td_flags |= TDF_ASTPENDING; 492#endif 493 mtx_unlock_spin(&sched_lock); 494 PROC_LOCK(p); 495 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 496 (kg->kg_completed == NULL)) { 497 kg->kg_upsleeps++; 498 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 499 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0)); 500 kg->kg_upsleeps--; 501 PROC_UNLOCK(p); 502 if (uap->timeout == NULL || error != EWOULDBLOCK) 503 return (0); 504 getnanouptime(&ts2); 505 if (timespeccmp(&ts2, &ts, >=)) 506 return (0); 507 ts3 = ts; 508 timespecsub(&ts3, &ts2); 509 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 510 PROC_LOCK(p); 511 } 512 PROC_UNLOCK(p); 513 return (0); 514} 515 516/* struct kse_wakeup_args { 517 struct kse_mailbox *mbx; 518}; */ 519int 520kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 521{ 522 struct proc *p; 523 struct ksegrp *kg; 524 struct kse_upcall *ku; 525 struct thread *td2; 526 527 p = td->td_proc; 528 td2 = NULL; 529 ku = NULL; 530 /* KSE-enabled processes only, please. */ 531 if (!(p->p_flag & P_THREADED)) 532 return (EINVAL); 533 PROC_LOCK(p); 534 mtx_lock_spin(&sched_lock); 535 if (uap->mbx) { 536 FOREACH_KSEGRP_IN_PROC(p, kg) { 537 FOREACH_UPCALL_IN_GROUP(kg, ku) { 538 if (ku->ku_mailbox == uap->mbx) 539 break; 540 } 541 if (ku) 542 break; 543 } 544 } else { 545 kg = td->td_ksegrp; 546 if (kg->kg_upsleeps) { 547 wakeup_one(&kg->kg_completed); 548 mtx_unlock_spin(&sched_lock); 549 PROC_UNLOCK(p); 550 return (0); 551 } 552 ku = TAILQ_FIRST(&kg->kg_upcalls); 553 } 554 if (ku) { 555 if ((td2 = ku->ku_owner) == NULL) { 556 panic("%s: no owner", __func__); 557 } else if (TD_ON_SLEEPQ(td2) && 558 (td2->td_wchan == &kg->kg_completed)) { 559 abortsleep(td2); 560 } else { 561 ku->ku_flags |= KUF_DOUPCALL; 562 } 563 mtx_unlock_spin(&sched_lock); 564 PROC_UNLOCK(p); 565 return (0); 566 } 567 mtx_unlock_spin(&sched_lock); 568 PROC_UNLOCK(p); 569 return (ESRCH); 570} 571 572/* 573 * No new KSEG: first call: use current KSE, don't schedule an upcall 574 * All other situations, do allocate max new KSEs and schedule an upcall. 575 */ 576/* struct kse_create_args { 577 struct kse_mailbox *mbx; 578 int newgroup; 579}; */ 580int 581kse_create(struct thread *td, struct kse_create_args *uap) 582{ 583 struct kse *newke; 584 struct ksegrp *newkg; 585 struct ksegrp *kg; 586 struct proc *p; 587 struct kse_mailbox mbx; 588 struct kse_upcall *newku; 589 int err, ncpus; 590 591 p = td->td_proc; 592 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 593 return (err); 594 595 /* Too bad, why hasn't kernel always a cpu counter !? */ 596#ifdef SMP 597 ncpus = mp_ncpus; 598#else 599 ncpus = 1; 600#endif 601 if (thread_debug && virtual_cpu != 0) 602 ncpus = virtual_cpu; 603 604 /* Easier to just set it than to test and set */ 605 PROC_LOCK(p); 606 p->p_flag |= P_THREADED; 607 PROC_UNLOCK(p); 608 kg = td->td_ksegrp; 609 if (uap->newgroup) { 610 /* Have race condition but it is cheap */ 611 if (p->p_numksegrps >= max_groups_per_proc) 612 return (EPROCLIM); 613 /* 614 * If we want a new KSEGRP it doesn't matter whether 615 * we have already fired up KSE mode before or not. 616 * We put the process in KSE mode and create a new KSEGRP. 617 */ 618 newkg = ksegrp_alloc(); 619 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 620 kg_startzero, kg_endzero)); 621 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 622 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 623 mtx_lock_spin(&sched_lock); 624 if (p->p_numksegrps >= max_groups_per_proc) { 625 mtx_unlock_spin(&sched_lock); 626 ksegrp_free(newkg); 627 return (EPROCLIM); 628 } 629 ksegrp_link(newkg, p); 630 mtx_unlock_spin(&sched_lock); 631 } else { 632 newkg = kg; 633 } 634 635 /* 636 * Creating upcalls more than number of physical cpu does 637 * not help performance. 638 */ 639 if (newkg->kg_numupcalls >= ncpus) 640 return (EPROCLIM); 641 642 if (newkg->kg_numupcalls == 0) { 643 /* 644 * Initialize KSE group, optimized for MP. 645 * Create KSEs as many as physical cpus, this increases 646 * concurrent even if userland is not MP safe and can only run 647 * on single CPU (for early version of libpthread, it is true). 648 * In ideal world, every physical cpu should execute a thread. 649 * If there is enough KSEs, threads in kernel can be 650 * executed parallel on different cpus with full speed, 651 * Concurrent in kernel shouldn't be restricted by number of 652 * upcalls userland provides. 653 * Adding more upcall structures only increases concurrent 654 * in userland. 655 * Highest performance configuration is: 656 * N kses = N upcalls = N phyiscal cpus 657 */ 658 while (newkg->kg_kses < ncpus) { 659 newke = kse_alloc(); 660 bzero(&newke->ke_startzero, RANGEOF(struct kse, 661 ke_startzero, ke_endzero)); 662#if 0 663 mtx_lock_spin(&sched_lock); 664 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 665 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 666 mtx_unlock_spin(&sched_lock); 667#endif 668 mtx_lock_spin(&sched_lock); 669 kse_link(newke, newkg); 670 /* Add engine */ 671 kse_reassign(newke); 672 mtx_unlock_spin(&sched_lock); 673 } 674 } 675 newku = upcall_alloc(); 676 newku->ku_mailbox = uap->mbx; 677 newku->ku_func = mbx.km_func; 678 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 679 680 /* For the first call this may not have been set */ 681 if (td->td_standin == NULL) 682 thread_alloc_spare(td, NULL); 683 684 mtx_lock_spin(&sched_lock); 685 if (newkg->kg_numupcalls >= ncpus) { 686 mtx_unlock_spin(&sched_lock); 687 upcall_free(newku); 688 return (EPROCLIM); 689 } 690 upcall_link(newku, newkg); 691 if (mbx.km_quantum) 692 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 693 694 /* 695 * Each upcall structure has an owner thread, find which 696 * one owns it. 697 */ 698 if (uap->newgroup) { 699 /* 700 * Because new ksegrp hasn't thread, 701 * create an initial upcall thread to own it. 702 */ 703 thread_schedule_upcall(td, newku); 704 } else { 705 /* 706 * If current thread hasn't an upcall structure, 707 * just assign the upcall to it. 708 */ 709 if (td->td_upcall == NULL) { 710 newku->ku_owner = td; 711 td->td_upcall = newku; 712 } else { 713 /* 714 * Create a new upcall thread to own it. 715 */ 716 thread_schedule_upcall(td, newku); 717 } 718 } 719 mtx_unlock_spin(&sched_lock); 720 return (0); 721} 722 723/* 724 * Fill a ucontext_t with a thread's context information. 725 * 726 * This is an analogue to getcontext(3). 727 */ 728void 729thread_getcontext(struct thread *td, ucontext_t *uc) 730{ 731 732/* 733 * XXX this is declared in a MD include file, i386/include/ucontext.h but 734 * is used in MI code. 735 */ 736#ifdef __i386__ 737 get_mcontext(td, &uc->uc_mcontext); 738#endif 739 PROC_LOCK(td->td_proc); 740 uc->uc_sigmask = td->td_sigmask; 741 PROC_UNLOCK(td->td_proc); 742} 743 744/* 745 * Set a thread's context from a ucontext_t. 746 * 747 * This is an analogue to setcontext(3). 748 */ 749int 750thread_setcontext(struct thread *td, ucontext_t *uc) 751{ 752 int ret; 753 754/* 755 * XXX this is declared in a MD include file, i386/include/ucontext.h but 756 * is used in MI code. 757 */ 758#ifdef __i386__ 759 ret = set_mcontext(td, &uc->uc_mcontext); 760#else 761 ret = ENOSYS; 762#endif 763 if (ret == 0) { 764 SIG_CANTMASK(uc->uc_sigmask); 765 PROC_LOCK(td->td_proc); 766 td->td_sigmask = uc->uc_sigmask; 767 PROC_UNLOCK(td->td_proc); 768 } 769 return (ret); 770} 771 772/* 773 * Initialize global thread allocation resources. 774 */ 775void 776threadinit(void) 777{ 778 779#ifndef __ia64__ 780 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 781 thread_ctor, thread_dtor, thread_init, thread_fini, 782 UMA_ALIGN_CACHE, 0); 783#else 784 /* 785 * XXX the ia64 kstack allocator is really lame and is at the mercy 786 * of contigmallloc(). This hackery is to pre-construct a whole 787 * pile of thread structures with associated kernel stacks early 788 * in the system startup while contigmalloc() still works. Once we 789 * have them, keep them. Sigh. 790 */ 791 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 792 thread_ctor, thread_dtor, thread_init, thread_fini, 793 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); 794 uma_prealloc(thread_zone, 512); /* XXX arbitary */ 795#endif 796 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 797 NULL, NULL, ksegrp_init, NULL, 798 UMA_ALIGN_CACHE, 0); 799 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 800 NULL, NULL, kse_init, NULL, 801 UMA_ALIGN_CACHE, 0); 802 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 803 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 804} 805 806/* 807 * Stash an embarasingly extra thread into the zombie thread queue. 808 */ 809void 810thread_stash(struct thread *td) 811{ 812 mtx_lock_spin(&kse_zombie_lock); 813 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 814 mtx_unlock_spin(&kse_zombie_lock); 815} 816 817/* 818 * Stash an embarasingly extra kse into the zombie kse queue. 819 */ 820void 821kse_stash(struct kse *ke) 822{ 823 mtx_lock_spin(&kse_zombie_lock); 824 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 825 mtx_unlock_spin(&kse_zombie_lock); 826} 827 828/* 829 * Stash an embarasingly extra upcall into the zombie upcall queue. 830 */ 831 832void 833upcall_stash(struct kse_upcall *ku) 834{ 835 mtx_lock_spin(&kse_zombie_lock); 836 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 837 mtx_unlock_spin(&kse_zombie_lock); 838} 839 840/* 841 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 842 */ 843void 844ksegrp_stash(struct ksegrp *kg) 845{ 846 mtx_lock_spin(&kse_zombie_lock); 847 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 848 mtx_unlock_spin(&kse_zombie_lock); 849} 850 851/* 852 * Reap zombie kse resource. 853 */ 854void 855thread_reap(void) 856{ 857 struct thread *td_first, *td_next; 858 struct kse *ke_first, *ke_next; 859 struct ksegrp *kg_first, * kg_next; 860 struct kse_upcall *ku_first, *ku_next; 861 862 /* 863 * Don't even bother to lock if none at this instant, 864 * we really don't care about the next instant.. 865 */ 866 if ((!TAILQ_EMPTY(&zombie_threads)) 867 || (!TAILQ_EMPTY(&zombie_kses)) 868 || (!TAILQ_EMPTY(&zombie_ksegrps)) 869 || (!TAILQ_EMPTY(&zombie_upcalls))) { 870 mtx_lock_spin(&kse_zombie_lock); 871 td_first = TAILQ_FIRST(&zombie_threads); 872 ke_first = TAILQ_FIRST(&zombie_kses); 873 kg_first = TAILQ_FIRST(&zombie_ksegrps); 874 ku_first = TAILQ_FIRST(&zombie_upcalls); 875 if (td_first) 876 TAILQ_INIT(&zombie_threads); 877 if (ke_first) 878 TAILQ_INIT(&zombie_kses); 879 if (kg_first) 880 TAILQ_INIT(&zombie_ksegrps); 881 if (ku_first) 882 TAILQ_INIT(&zombie_upcalls); 883 mtx_unlock_spin(&kse_zombie_lock); 884 while (td_first) { 885 td_next = TAILQ_NEXT(td_first, td_runq); 886 if (td_first->td_ucred) 887 crfree(td_first->td_ucred); 888 thread_free(td_first); 889 td_first = td_next; 890 } 891 while (ke_first) { 892 ke_next = TAILQ_NEXT(ke_first, ke_procq); 893 kse_free(ke_first); 894 ke_first = ke_next; 895 } 896 while (kg_first) { 897 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 898 ksegrp_free(kg_first); 899 kg_first = kg_next; 900 } 901 while (ku_first) { 902 ku_next = TAILQ_NEXT(ku_first, ku_link); 903 upcall_free(ku_first); 904 ku_first = ku_next; 905 } 906 } 907} 908 909/* 910 * Allocate a ksegrp. 911 */ 912struct ksegrp * 913ksegrp_alloc(void) 914{ 915 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 916} 917 918/* 919 * Allocate a kse. 920 */ 921struct kse * 922kse_alloc(void) 923{ 924 return (uma_zalloc(kse_zone, M_WAITOK)); 925} 926 927/* 928 * Allocate a thread. 929 */ 930struct thread * 931thread_alloc(void) 932{ 933 thread_reap(); /* check if any zombies to get */ 934 return (uma_zalloc(thread_zone, M_WAITOK)); 935} 936 937/* 938 * Deallocate a ksegrp. 939 */ 940void 941ksegrp_free(struct ksegrp *td) 942{ 943 uma_zfree(ksegrp_zone, td); 944} 945 946/* 947 * Deallocate a kse. 948 */ 949void 950kse_free(struct kse *td) 951{ 952 uma_zfree(kse_zone, td); 953} 954 955/* 956 * Deallocate a thread. 957 */ 958void 959thread_free(struct thread *td) 960{ 961 962 cpu_thread_clean(td); 963 uma_zfree(thread_zone, td); 964} 965 966/* 967 * Store the thread context in the UTS's mailbox. 968 * then add the mailbox at the head of a list we are building in user space. 969 * The list is anchored in the ksegrp structure. 970 */ 971int 972thread_export_context(struct thread *td) 973{ 974 struct proc *p; 975 struct ksegrp *kg; 976 uintptr_t mbx; 977 void *addr; 978 int error,temp; 979 ucontext_t uc; 980 981 p = td->td_proc; 982 kg = td->td_ksegrp; 983 984 /* Export the user/machine context. */ 985 addr = (void *)(&td->td_mailbox->tm_context); 986 error = copyin(addr, &uc, sizeof(ucontext_t)); 987 if (error) 988 goto bad; 989 990 thread_getcontext(td, &uc); 991 error = copyout(&uc, addr, sizeof(ucontext_t)); 992 if (error) 993 goto bad; 994 995 /* Exports clock ticks in kernel mode */ 996 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 997 temp = fuword(addr) + td->td_usticks; 998 if (suword(addr, temp)) 999 goto bad; 1000 1001 /* Get address in latest mbox of list pointer */ 1002 addr = (void *)(&td->td_mailbox->tm_next); 1003 /* 1004 * Put the saved address of the previous first 1005 * entry into this one 1006 */ 1007 for (;;) { 1008 mbx = (uintptr_t)kg->kg_completed; 1009 if (suword(addr, mbx)) { 1010 error = EFAULT; 1011 goto bad; 1012 } 1013 PROC_LOCK(p); 1014 if (mbx == (uintptr_t)kg->kg_completed) { 1015 kg->kg_completed = td->td_mailbox; 1016 /* 1017 * The thread context may be taken away by 1018 * other upcall threads when we unlock 1019 * process lock. it's no longer valid to 1020 * use it again in any other places. 1021 */ 1022 td->td_mailbox = NULL; 1023 PROC_UNLOCK(p); 1024 break; 1025 } 1026 PROC_UNLOCK(p); 1027 } 1028 td->td_usticks = 0; 1029 return (0); 1030 1031bad: 1032 PROC_LOCK(p); 1033 psignal(p, SIGSEGV); 1034 PROC_UNLOCK(p); 1035 /* The mailbox is bad, don't use it */ 1036 td->td_mailbox = NULL; 1037 td->td_usticks = 0; 1038 return (error); 1039} 1040 1041/* 1042 * Take the list of completed mailboxes for this KSEGRP and put them on this 1043 * upcall's mailbox as it's the next one going up. 1044 */ 1045static int 1046thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1047{ 1048 struct proc *p = kg->kg_proc; 1049 void *addr; 1050 uintptr_t mbx; 1051 1052 addr = (void *)(&ku->ku_mailbox->km_completed); 1053 for (;;) { 1054 mbx = (uintptr_t)kg->kg_completed; 1055 if (suword(addr, mbx)) { 1056 PROC_LOCK(p); 1057 psignal(p, SIGSEGV); 1058 PROC_UNLOCK(p); 1059 return (EFAULT); 1060 } 1061 PROC_LOCK(p); 1062 if (mbx == (uintptr_t)kg->kg_completed) { 1063 kg->kg_completed = NULL; 1064 PROC_UNLOCK(p); 1065 break; 1066 } 1067 PROC_UNLOCK(p); 1068 } 1069 return (0); 1070} 1071 1072/* 1073 * This function should be called at statclock interrupt time 1074 */ 1075int 1076thread_statclock(int user) 1077{ 1078 struct thread *td = curthread; 1079 1080 if (td->td_ksegrp->kg_numupcalls == 0) 1081 return (-1); 1082 if (user) { 1083 /* Current always do via ast() */ 1084 mtx_lock_spin(&sched_lock); 1085 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1086 mtx_unlock_spin(&sched_lock); 1087 td->td_uuticks++; 1088 } else { 1089 if (td->td_mailbox != NULL) 1090 td->td_usticks++; 1091 else { 1092 /* XXXKSE 1093 * We will call thread_user_enter() for every 1094 * kernel entry in future, so if the thread mailbox 1095 * is NULL, it must be a UTS kernel, don't account 1096 * clock ticks for it. 1097 */ 1098 } 1099 } 1100 return (0); 1101} 1102 1103/* 1104 * Export state clock ticks for userland 1105 */ 1106static int 1107thread_update_usr_ticks(struct thread *td, int user) 1108{ 1109 struct proc *p = td->td_proc; 1110 struct kse_thr_mailbox *tmbx; 1111 struct kse_upcall *ku; 1112 struct ksegrp *kg; 1113 caddr_t addr; 1114 uint uticks; 1115 1116 if ((ku = td->td_upcall) == NULL) 1117 return (-1); 1118 1119 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1120 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1121 return (-1); 1122 if (user) { 1123 uticks = td->td_uuticks; 1124 td->td_uuticks = 0; 1125 addr = (caddr_t)&tmbx->tm_uticks; 1126 } else { 1127 uticks = td->td_usticks; 1128 td->td_usticks = 0; 1129 addr = (caddr_t)&tmbx->tm_sticks; 1130 } 1131 if (uticks) { 1132 if (suword(addr, uticks+fuword(addr))) { 1133 PROC_LOCK(p); 1134 psignal(p, SIGSEGV); 1135 PROC_UNLOCK(p); 1136 return (-2); 1137 } 1138 } 1139 kg = td->td_ksegrp; 1140 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1141 mtx_lock_spin(&sched_lock); 1142 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1143 mtx_unlock_spin(&sched_lock); 1144 } 1145 return (0); 1146} 1147 1148/* 1149 * Discard the current thread and exit from its context. 1150 * 1151 * Because we can't free a thread while we're operating under its context, 1152 * push the current thread into our CPU's deadthread holder. This means 1153 * we needn't worry about someone else grabbing our context before we 1154 * do a cpu_throw(). 1155 */ 1156void 1157thread_exit(void) 1158{ 1159 struct thread *td; 1160 struct kse *ke; 1161 struct proc *p; 1162 struct ksegrp *kg; 1163 1164 td = curthread; 1165 kg = td->td_ksegrp; 1166 p = td->td_proc; 1167 ke = td->td_kse; 1168 1169 mtx_assert(&sched_lock, MA_OWNED); 1170 KASSERT(p != NULL, ("thread exiting without a process")); 1171 KASSERT(ke != NULL, ("thread exiting without a kse")); 1172 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1173 PROC_LOCK_ASSERT(p, MA_OWNED); 1174 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1175 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1176 1177 if (td->td_standin != NULL) { 1178 thread_stash(td->td_standin); 1179 td->td_standin = NULL; 1180 } 1181 1182 cpu_thread_exit(td); /* XXXSMP */ 1183 1184 /* 1185 * The last thread is left attached to the process 1186 * So that the whole bundle gets recycled. Skip 1187 * all this stuff. 1188 */ 1189 if (p->p_numthreads > 1) { 1190 thread_unlink(td); 1191 if (p->p_maxthrwaits) 1192 wakeup(&p->p_numthreads); 1193 /* 1194 * The test below is NOT true if we are the 1195 * sole exiting thread. P_STOPPED_SNGL is unset 1196 * in exit1() after it is the only survivor. 1197 */ 1198 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1199 if (p->p_numthreads == p->p_suspcount) { 1200 thread_unsuspend_one(p->p_singlethread); 1201 } 1202 } 1203 1204 /* 1205 * Because each upcall structure has an owner thread, 1206 * owner thread exits only when process is in exiting 1207 * state, so upcall to userland is no longer needed, 1208 * deleting upcall structure is safe here. 1209 * So when all threads in a group is exited, all upcalls 1210 * in the group should be automatically freed. 1211 */ 1212 if (td->td_upcall) 1213 upcall_remove(td); 1214 1215 ke->ke_state = KES_UNQUEUED; 1216 ke->ke_thread = NULL; 1217 /* 1218 * Decide what to do with the KSE attached to this thread. 1219 */ 1220 if (ke->ke_flags & KEF_EXIT) 1221 kse_unlink(ke); 1222 else 1223 kse_reassign(ke); 1224 PROC_UNLOCK(p); 1225 td->td_kse = NULL; 1226 td->td_state = TDS_INACTIVE; 1227#if 0 1228 td->td_proc = NULL; 1229#endif 1230 td->td_ksegrp = NULL; 1231 td->td_last_kse = NULL; 1232 PCPU_SET(deadthread, td); 1233 } else { 1234 PROC_UNLOCK(p); 1235 } 1236 /* XXX Shouldn't cpu_throw() here. */ 1237 mtx_assert(&sched_lock, MA_OWNED); 1238#if defined(__i386__) || defined(__sparc64__) 1239 cpu_throw(td, choosethread()); 1240#else 1241 cpu_throw(); 1242#endif 1243 panic("I'm a teapot!"); 1244 /* NOTREACHED */ 1245} 1246 1247/* 1248 * Do any thread specific cleanups that may be needed in wait() 1249 * called with Giant held, proc and schedlock not held. 1250 */ 1251void 1252thread_wait(struct proc *p) 1253{ 1254 struct thread *td; 1255 1256 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1257 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1258 FOREACH_THREAD_IN_PROC(p, td) { 1259 if (td->td_standin != NULL) { 1260 thread_free(td->td_standin); 1261 td->td_standin = NULL; 1262 } 1263 cpu_thread_clean(td); 1264 } 1265 thread_reap(); /* check for zombie threads etc. */ 1266} 1267 1268/* 1269 * Link a thread to a process. 1270 * set up anything that needs to be initialized for it to 1271 * be used by the process. 1272 * 1273 * Note that we do not link to the proc's ucred here. 1274 * The thread is linked as if running but no KSE assigned. 1275 */ 1276void 1277thread_link(struct thread *td, struct ksegrp *kg) 1278{ 1279 struct proc *p; 1280 1281 p = kg->kg_proc; 1282 td->td_state = TDS_INACTIVE; 1283 td->td_proc = p; 1284 td->td_ksegrp = kg; 1285 td->td_last_kse = NULL; 1286 td->td_flags = 0; 1287 td->td_kse = NULL; 1288 1289 LIST_INIT(&td->td_contested); 1290 callout_init(&td->td_slpcallout, 1); 1291 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1292 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1293 p->p_numthreads++; 1294 kg->kg_numthreads++; 1295} 1296 1297void 1298thread_unlink(struct thread *td) 1299{ 1300 struct proc *p = td->td_proc; 1301 struct ksegrp *kg = td->td_ksegrp; 1302 1303 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1304 p->p_numthreads--; 1305 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1306 kg->kg_numthreads--; 1307 /* could clear a few other things here */ 1308} 1309 1310/* 1311 * Purge a ksegrp resource. When a ksegrp is preparing to 1312 * exit, it calls this function. 1313 */ 1314void 1315kse_purge_group(struct thread *td) 1316{ 1317 struct ksegrp *kg; 1318 struct kse *ke; 1319 1320 kg = td->td_ksegrp; 1321 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1322 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1323 KASSERT(ke->ke_state == KES_IDLE, 1324 ("%s: wrong idle KSE state", __func__)); 1325 kse_unlink(ke); 1326 } 1327 KASSERT((kg->kg_kses == 1), 1328 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1329 KASSERT((kg->kg_numupcalls == 0), 1330 ("%s: ksegrp still has %d upcall datas", 1331 __func__, kg->kg_numupcalls)); 1332} 1333 1334/* 1335 * Purge a process's KSE resource. When a process is preparing to 1336 * exit, it calls kse_purge to release any extra KSE resources in 1337 * the process. 1338 */ 1339void 1340kse_purge(struct proc *p, struct thread *td) 1341{ 1342 struct ksegrp *kg; 1343 struct kse *ke; 1344 1345 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1346 mtx_lock_spin(&sched_lock); 1347 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1348 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1349 p->p_numksegrps--; 1350 /* 1351 * There is no ownership for KSE, after all threads 1352 * in the group exited, it is possible that some KSEs 1353 * were left in idle queue, gc them now. 1354 */ 1355 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1356 KASSERT(ke->ke_state == KES_IDLE, 1357 ("%s: wrong idle KSE state", __func__)); 1358 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1359 kg->kg_idle_kses--; 1360 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1361 kg->kg_kses--; 1362 kse_stash(ke); 1363 } 1364 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1365 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1366 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1367 KASSERT((kg->kg_numupcalls == 0), 1368 ("%s: ksegrp still has %d upcall datas", 1369 __func__, kg->kg_numupcalls)); 1370 1371 if (kg != td->td_ksegrp) 1372 ksegrp_stash(kg); 1373 } 1374 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1375 p->p_numksegrps++; 1376 mtx_unlock_spin(&sched_lock); 1377} 1378 1379/* 1380 * This function is intended to be used to initialize a spare thread 1381 * for upcall. Initialize thread's large data area outside sched_lock 1382 * for thread_schedule_upcall(). 1383 */ 1384void 1385thread_alloc_spare(struct thread *td, struct thread *spare) 1386{ 1387 if (td->td_standin) 1388 return; 1389 if (spare == NULL) 1390 spare = thread_alloc(); 1391 td->td_standin = spare; 1392 bzero(&spare->td_startzero, 1393 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1394 spare->td_proc = td->td_proc; 1395 spare->td_ucred = crhold(td->td_ucred); 1396} 1397 1398/* 1399 * Create a thread and schedule it for upcall on the KSE given. 1400 * Use our thread's standin so that we don't have to allocate one. 1401 */ 1402struct thread * 1403thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1404{ 1405 struct thread *td2; 1406 1407 mtx_assert(&sched_lock, MA_OWNED); 1408 1409 /* 1410 * Schedule an upcall thread on specified kse_upcall, 1411 * the kse_upcall must be free. 1412 * td must have a spare thread. 1413 */ 1414 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1415 if ((td2 = td->td_standin) != NULL) { 1416 td->td_standin = NULL; 1417 } else { 1418 panic("no reserve thread when scheduling an upcall"); 1419 return (NULL); 1420 } 1421 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1422 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1423 bcopy(&td->td_startcopy, &td2->td_startcopy, 1424 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1425 thread_link(td2, ku->ku_ksegrp); 1426 /* inherit blocked thread's context */ 1427 bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe)); 1428 cpu_set_upcall(td2, td->td_pcb); 1429 /* Let the new thread become owner of the upcall */ 1430 ku->ku_owner = td2; 1431 td2->td_upcall = ku; 1432 td2->td_flags = TDF_UPCALLING; 1433#if 0 /* XXX This shouldn't be necessary */ 1434 if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1435 td2->td_flags |= TDF_ASTPENDING; 1436#endif 1437 td2->td_kse = NULL; 1438 td2->td_state = TDS_CAN_RUN; 1439 td2->td_inhibitors = 0; 1440 setrunqueue(td2); 1441 return (td2); /* bogus.. should be a void function */ 1442} 1443 1444void 1445thread_signal_add(struct thread *td, int sig) 1446{ 1447 struct kse_upcall *ku; 1448 struct proc *p; 1449 sigset_t ss; 1450 int error; 1451 1452 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1453 td = curthread; 1454 ku = td->td_upcall; 1455 p = td->td_proc; 1456 1457 PROC_UNLOCK(p); 1458 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1459 if (error) 1460 goto error; 1461 1462 SIGADDSET(ss, sig); 1463 1464 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1465 if (error) 1466 goto error; 1467 1468 PROC_LOCK(p); 1469 return; 1470error: 1471 PROC_LOCK(p); 1472 sigexit(td, SIGILL); 1473} 1474 1475 1476/* 1477 * Schedule an upcall to notify a KSE process recieved signals. 1478 * 1479 */ 1480void 1481thread_signal_upcall(struct thread *td) 1482{ 1483 mtx_lock_spin(&sched_lock); 1484 td->td_flags |= TDF_UPCALLING; 1485 mtx_unlock_spin(&sched_lock); 1486 1487 return; 1488} 1489 1490void 1491thread_switchout(struct thread *td) 1492{ 1493 struct kse_upcall *ku; 1494 1495 mtx_assert(&sched_lock, MA_OWNED); 1496 1497 /* 1498 * If the outgoing thread is in threaded group and has never 1499 * scheduled an upcall, decide whether this is a short 1500 * or long term event and thus whether or not to schedule 1501 * an upcall. 1502 * If it is a short term event, just suspend it in 1503 * a way that takes its KSE with it. 1504 * Select the events for which we want to schedule upcalls. 1505 * For now it's just sleep. 1506 * XXXKSE eventually almost any inhibition could do. 1507 */ 1508 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1509 /* 1510 * Release ownership of upcall, and schedule an upcall 1511 * thread, this new upcall thread becomes the owner of 1512 * the upcall structure. 1513 */ 1514 ku = td->td_upcall; 1515 ku->ku_owner = NULL; 1516 td->td_upcall = NULL; 1517 td->td_flags &= ~TDF_CAN_UNBIND; 1518 thread_schedule_upcall(td, ku); 1519 } 1520} 1521 1522/* 1523 * Setup done on the thread when it enters the kernel. 1524 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1525 */ 1526void 1527thread_user_enter(struct proc *p, struct thread *td) 1528{ 1529 struct ksegrp *kg; 1530 struct kse_upcall *ku; 1531 struct kse_thr_mailbox *tmbx; 1532 1533 kg = td->td_ksegrp; 1534 1535 /* 1536 * First check that we shouldn't just abort. 1537 * But check if we are the single thread first! 1538 */ 1539 PROC_LOCK(p); 1540 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1541 mtx_lock_spin(&sched_lock); 1542 thread_stopped(p); 1543 thread_exit(); 1544 /* NOTREACHED */ 1545 } 1546 PROC_UNLOCK(p); 1547 1548 /* 1549 * If we are doing a syscall in a KSE environment, 1550 * note where our mailbox is. There is always the 1551 * possibility that we could do this lazily (in kse_reassign()), 1552 * but for now do it every time. 1553 */ 1554 kg = td->td_ksegrp; 1555 if (kg->kg_numupcalls) { 1556 ku = td->td_upcall; 1557 KASSERT(ku, ("%s: no upcall owned", __func__)); 1558 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1559 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); 1560 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags); 1561 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1562 if ((tmbx == NULL) || (tmbx == (void *)-1)) { 1563 td->td_mailbox = NULL; 1564 } else { 1565 td->td_mailbox = tmbx; 1566 if (td->td_standin == NULL) 1567 thread_alloc_spare(td, NULL); 1568 mtx_lock_spin(&sched_lock); 1569 if (ku->ku_mflags & KMF_NOUPCALL) 1570 td->td_flags &= ~TDF_CAN_UNBIND; 1571 else 1572 td->td_flags |= TDF_CAN_UNBIND; 1573 mtx_unlock_spin(&sched_lock); 1574 } 1575 } 1576} 1577 1578/* 1579 * The extra work we go through if we are a threaded process when we 1580 * return to userland. 1581 * 1582 * If we are a KSE process and returning to user mode, check for 1583 * extra work to do before we return (e.g. for more syscalls 1584 * to complete first). If we were in a critical section, we should 1585 * just return to let it finish. Same if we were in the UTS (in 1586 * which case the mailbox's context's busy indicator will be set). 1587 * The only traps we suport will have set the mailbox. 1588 * We will clear it here. 1589 */ 1590int 1591thread_userret(struct thread *td, struct trapframe *frame) 1592{ 1593 int error = 0, upcalls, uts_crit; 1594 struct kse_upcall *ku; 1595 struct ksegrp *kg, *kg2; 1596 struct proc *p; 1597 struct timespec ts; 1598 1599 p = td->td_proc; 1600 kg = td->td_ksegrp; 1601 1602 /* Nothing to do with non-threaded group/process */ 1603 if (td->td_ksegrp->kg_numupcalls == 0) 1604 return (0); 1605 1606 /* 1607 * Stat clock interrupt hit in userland, it 1608 * is returning from interrupt, charge thread's 1609 * userland time for UTS. 1610 */ 1611 if (td->td_flags & TDF_USTATCLOCK) { 1612 thread_update_usr_ticks(td, 1); 1613 mtx_lock_spin(&sched_lock); 1614 td->td_flags &= ~TDF_USTATCLOCK; 1615 mtx_unlock_spin(&sched_lock); 1616 if (kg->kg_completed || 1617 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1618 thread_user_enter(p, td); 1619 } 1620 1621 uts_crit = (td->td_mailbox == NULL); 1622 ku = td->td_upcall; 1623 /* 1624 * Optimisation: 1625 * This thread has not started any upcall. 1626 * If there is no work to report other than ourself, 1627 * then it can return direct to userland. 1628 */ 1629 if (TD_CAN_UNBIND(td)) { 1630 mtx_lock_spin(&sched_lock); 1631 td->td_flags &= ~TDF_CAN_UNBIND; 1632 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1633 (kg->kg_completed == NULL) && 1634 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1635 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { 1636 mtx_unlock_spin(&sched_lock); 1637 thread_update_usr_ticks(td, 0); 1638 nanotime(&ts); 1639 error = copyout(&ts, 1640 (caddr_t)&ku->ku_mailbox->km_timeofday, 1641 sizeof(ts)); 1642 td->td_mailbox = 0; 1643 ku->ku_mflags = 0; 1644 if (error) 1645 goto out; 1646 return (0); 1647 } 1648 mtx_unlock_spin(&sched_lock); 1649 error = thread_export_context(td); 1650 if (error) { 1651 /* 1652 * Failing to do the KSE operation just defaults 1653 * back to synchonous operation, so just return from 1654 * the syscall. 1655 */ 1656 goto out; 1657 } 1658 /* 1659 * There is something to report, and we own an upcall 1660 * strucuture, we can go to userland. 1661 * Turn ourself into an upcall thread. 1662 */ 1663 mtx_lock_spin(&sched_lock); 1664 td->td_flags |= TDF_UPCALLING; 1665 mtx_unlock_spin(&sched_lock); 1666 } else if (td->td_mailbox && (ku == NULL)) { 1667 error = thread_export_context(td); 1668 /* possibly upcall with error? */ 1669 PROC_LOCK(p); 1670 /* 1671 * There are upcall threads waiting for 1672 * work to do, wake one of them up. 1673 * XXXKSE Maybe wake all of them up. 1674 */ 1675 if (!error && kg->kg_upsleeps) 1676 wakeup_one(&kg->kg_completed); 1677 mtx_lock_spin(&sched_lock); 1678 thread_stopped(p); 1679 thread_exit(); 1680 /* NOTREACHED */ 1681 } 1682 1683 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1684 1685 if (p->p_numthreads > max_threads_per_proc) { 1686 max_threads_hits++; 1687 PROC_LOCK(p); 1688 while (p->p_numthreads > max_threads_per_proc) { 1689 if (P_SHOULDSTOP(p)) 1690 break; 1691 upcalls = 0; 1692 mtx_lock_spin(&sched_lock); 1693 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1694 if (kg2->kg_numupcalls == 0) 1695 upcalls++; 1696 else 1697 upcalls += kg2->kg_numupcalls; 1698 } 1699 mtx_unlock_spin(&sched_lock); 1700 if (upcalls >= max_threads_per_proc) 1701 break; 1702 p->p_maxthrwaits++; 1703 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1704 "maxthreads", NULL); 1705 p->p_maxthrwaits--; 1706 } 1707 PROC_UNLOCK(p); 1708 } 1709 1710 if (td->td_flags & TDF_UPCALLING) { 1711 uts_crit = 0; 1712 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1713 /* 1714 * There is no more work to do and we are going to ride 1715 * this thread up to userland as an upcall. 1716 * Do the last parts of the setup needed for the upcall. 1717 */ 1718 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1719 td, td->td_proc->p_pid, td->td_proc->p_comm); 1720 1721 mtx_lock_spin(&sched_lock); 1722 td->td_flags &= ~TDF_UPCALLING; 1723 if (ku->ku_flags & KUF_DOUPCALL) 1724 ku->ku_flags &= ~KUF_DOUPCALL; 1725 mtx_unlock_spin(&sched_lock); 1726 1727 /* 1728 * Set user context to the UTS 1729 */ 1730 if (!(ku->ku_mflags & KMF_NOUPCALL)) { 1731 cpu_set_upcall_kse(td, ku); 1732 error = suword(&ku->ku_mailbox->km_curthread, 0); 1733 if (error) 1734 goto out; 1735 } 1736 1737 /* 1738 * Unhook the list of completed threads. 1739 * anything that completes after this gets to 1740 * come in next time. 1741 * Put the list of completed thread mailboxes on 1742 * this KSE's mailbox. 1743 */ 1744 if (!(ku->ku_mflags & KMF_NOCOMPLETED) && 1745 (error = thread_link_mboxes(kg, ku)) != 0) 1746 goto out; 1747 } 1748 if (!uts_crit) { 1749 nanotime(&ts); 1750 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); 1751 } 1752 1753out: 1754 if (error) { 1755 /* 1756 * Things are going to be so screwed we should just kill 1757 * the process. 1758 * how do we do that? 1759 */ 1760 PROC_LOCK(td->td_proc); 1761 psignal(td->td_proc, SIGSEGV); 1762 PROC_UNLOCK(td->td_proc); 1763 } else { 1764 /* 1765 * Optimisation: 1766 * Ensure that we have a spare thread available, 1767 * for when we re-enter the kernel. 1768 */ 1769 if (td->td_standin == NULL) 1770 thread_alloc_spare(td, NULL); 1771 } 1772 1773 ku->ku_mflags = 0; 1774 /* 1775 * Clear thread mailbox first, then clear system tick count. 1776 * The order is important because thread_statclock() use 1777 * mailbox pointer to see if it is an userland thread or 1778 * an UTS kernel thread. 1779 */ 1780 td->td_mailbox = NULL; 1781 td->td_usticks = 0; 1782 return (error); /* go sync */ 1783} 1784 1785/* 1786 * Enforce single-threading. 1787 * 1788 * Returns 1 if the caller must abort (another thread is waiting to 1789 * exit the process or similar). Process is locked! 1790 * Returns 0 when you are successfully the only thread running. 1791 * A process has successfully single threaded in the suspend mode when 1792 * There are no threads in user mode. Threads in the kernel must be 1793 * allowed to continue until they get to the user boundary. They may even 1794 * copy out their return values and data before suspending. They may however be 1795 * accellerated in reaching the user boundary as we will wake up 1796 * any sleeping threads that are interruptable. (PCATCH). 1797 */ 1798int 1799thread_single(int force_exit) 1800{ 1801 struct thread *td; 1802 struct thread *td2; 1803 struct proc *p; 1804 1805 td = curthread; 1806 p = td->td_proc; 1807 mtx_assert(&Giant, MA_OWNED); 1808 PROC_LOCK_ASSERT(p, MA_OWNED); 1809 KASSERT((td != NULL), ("curthread is NULL")); 1810 1811 if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1) 1812 return (0); 1813 1814 /* Is someone already single threading? */ 1815 if (p->p_singlethread) 1816 return (1); 1817 1818 if (force_exit == SINGLE_EXIT) { 1819 p->p_flag |= P_SINGLE_EXIT; 1820 } else 1821 p->p_flag &= ~P_SINGLE_EXIT; 1822 p->p_flag |= P_STOPPED_SINGLE; 1823 p->p_singlethread = td; 1824 /* XXXKSE Which lock protects the below values? */ 1825 while ((p->p_numthreads - p->p_suspcount) != 1) { 1826 mtx_lock_spin(&sched_lock); 1827 FOREACH_THREAD_IN_PROC(p, td2) { 1828 if (td2 == td) 1829 continue; 1830 td2->td_flags |= TDF_ASTPENDING; 1831 if (TD_IS_INHIBITED(td2)) { 1832 if (force_exit == SINGLE_EXIT) { 1833 if (TD_IS_SUSPENDED(td2)) { 1834 thread_unsuspend_one(td2); 1835 } 1836 if (TD_ON_SLEEPQ(td2) && 1837 (td2->td_flags & TDF_SINTR)) { 1838 if (td2->td_flags & TDF_CVWAITQ) 1839 cv_abort(td2); 1840 else 1841 abortsleep(td2); 1842 } 1843 } else { 1844 if (TD_IS_SUSPENDED(td2)) 1845 continue; 1846 /* 1847 * maybe other inhibitted states too? 1848 * XXXKSE Is it totally safe to 1849 * suspend a non-interruptable thread? 1850 */ 1851 if (td2->td_inhibitors & 1852 (TDI_SLEEPING | TDI_SWAPPED)) 1853 thread_suspend_one(td2); 1854 } 1855 } 1856 } 1857 /* 1858 * Maybe we suspended some threads.. was it enough? 1859 */ 1860 if ((p->p_numthreads - p->p_suspcount) == 1) { 1861 mtx_unlock_spin(&sched_lock); 1862 break; 1863 } 1864 1865 /* 1866 * Wake us up when everyone else has suspended. 1867 * In the mean time we suspend as well. 1868 */ 1869 thread_suspend_one(td); 1870 /* XXX If you recursed this is broken. */ 1871 mtx_unlock(&Giant); 1872 PROC_UNLOCK(p); 1873 p->p_stats->p_ru.ru_nvcsw++; 1874 mi_switch(); 1875 mtx_unlock_spin(&sched_lock); 1876 mtx_lock(&Giant); 1877 PROC_LOCK(p); 1878 } 1879 if (force_exit == SINGLE_EXIT) { 1880 if (td->td_upcall) { 1881 mtx_lock_spin(&sched_lock); 1882 upcall_remove(td); 1883 mtx_unlock_spin(&sched_lock); 1884 } 1885 kse_purge(p, td); 1886 } 1887 return (0); 1888} 1889 1890/* 1891 * Called in from locations that can safely check to see 1892 * whether we have to suspend or at least throttle for a 1893 * single-thread event (e.g. fork). 1894 * 1895 * Such locations include userret(). 1896 * If the "return_instead" argument is non zero, the thread must be able to 1897 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1898 * 1899 * The 'return_instead' argument tells the function if it may do a 1900 * thread_exit() or suspend, or whether the caller must abort and back 1901 * out instead. 1902 * 1903 * If the thread that set the single_threading request has set the 1904 * P_SINGLE_EXIT bit in the process flags then this call will never return 1905 * if 'return_instead' is false, but will exit. 1906 * 1907 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1908 *---------------+--------------------+--------------------- 1909 * 0 | returns 0 | returns 0 or 1 1910 * | when ST ends | immediatly 1911 *---------------+--------------------+--------------------- 1912 * 1 | thread exits | returns 1 1913 * | | immediatly 1914 * 0 = thread_exit() or suspension ok, 1915 * other = return error instead of stopping the thread. 1916 * 1917 * While a full suspension is under effect, even a single threading 1918 * thread would be suspended if it made this call (but it shouldn't). 1919 * This call should only be made from places where 1920 * thread_exit() would be safe as that may be the outcome unless 1921 * return_instead is set. 1922 */ 1923int 1924thread_suspend_check(int return_instead) 1925{ 1926 struct thread *td; 1927 struct proc *p; 1928 struct ksegrp *kg; 1929 1930 td = curthread; 1931 p = td->td_proc; 1932 kg = td->td_ksegrp; 1933 PROC_LOCK_ASSERT(p, MA_OWNED); 1934 while (P_SHOULDSTOP(p)) { 1935 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1936 KASSERT(p->p_singlethread != NULL, 1937 ("singlethread not set")); 1938 /* 1939 * The only suspension in action is a 1940 * single-threading. Single threader need not stop. 1941 * XXX Should be safe to access unlocked 1942 * as it can only be set to be true by us. 1943 */ 1944 if (p->p_singlethread == td) 1945 return (0); /* Exempt from stopping. */ 1946 } 1947 if (return_instead) 1948 return (1); 1949 1950 mtx_lock_spin(&sched_lock); 1951 thread_stopped(p); 1952 /* 1953 * If the process is waiting for us to exit, 1954 * this thread should just suicide. 1955 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1956 */ 1957 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1958 while (mtx_owned(&Giant)) 1959 mtx_unlock(&Giant); 1960 if (p->p_flag & P_THREADED) 1961 thread_exit(); 1962 else 1963 thr_exit1(); 1964 } 1965 1966 mtx_assert(&Giant, MA_NOTOWNED); 1967 /* 1968 * When a thread suspends, it just 1969 * moves to the processes's suspend queue 1970 * and stays there. 1971 */ 1972 thread_suspend_one(td); 1973 PROC_UNLOCK(p); 1974 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1975 if (p->p_numthreads == p->p_suspcount) { 1976 thread_unsuspend_one(p->p_singlethread); 1977 } 1978 } 1979 p->p_stats->p_ru.ru_nivcsw++; 1980 mi_switch(); 1981 mtx_unlock_spin(&sched_lock); 1982 PROC_LOCK(p); 1983 } 1984 return (0); 1985} 1986 1987void 1988thread_suspend_one(struct thread *td) 1989{ 1990 struct proc *p = td->td_proc; 1991 1992 mtx_assert(&sched_lock, MA_OWNED); 1993 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1994 p->p_suspcount++; 1995 TD_SET_SUSPENDED(td); 1996 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1997 /* 1998 * Hack: If we are suspending but are on the sleep queue 1999 * then we are in msleep or the cv equivalent. We 2000 * want to look like we have two Inhibitors. 2001 * May already be set.. doesn't matter. 2002 */ 2003 if (TD_ON_SLEEPQ(td)) 2004 TD_SET_SLEEPING(td); 2005} 2006 2007void 2008thread_unsuspend_one(struct thread *td) 2009{ 2010 struct proc *p = td->td_proc; 2011 2012 mtx_assert(&sched_lock, MA_OWNED); 2013 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 2014 TD_CLR_SUSPENDED(td); 2015 p->p_suspcount--; 2016 setrunnable(td); 2017} 2018 2019/* 2020 * Allow all threads blocked by single threading to continue running. 2021 */ 2022void 2023thread_unsuspend(struct proc *p) 2024{ 2025 struct thread *td; 2026 2027 mtx_assert(&sched_lock, MA_OWNED); 2028 PROC_LOCK_ASSERT(p, MA_OWNED); 2029 if (!P_SHOULDSTOP(p)) { 2030 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2031 thread_unsuspend_one(td); 2032 } 2033 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 2034 (p->p_numthreads == p->p_suspcount)) { 2035 /* 2036 * Stopping everything also did the job for the single 2037 * threading request. Now we've downgraded to single-threaded, 2038 * let it continue. 2039 */ 2040 thread_unsuspend_one(p->p_singlethread); 2041 } 2042} 2043 2044void 2045thread_single_end(void) 2046{ 2047 struct thread *td; 2048 struct proc *p; 2049 2050 td = curthread; 2051 p = td->td_proc; 2052 PROC_LOCK_ASSERT(p, MA_OWNED); 2053 p->p_flag &= ~P_STOPPED_SINGLE; 2054 p->p_singlethread = NULL; 2055 /* 2056 * If there are other threads they mey now run, 2057 * unless of course there is a blanket 'stop order' 2058 * on the process. The single threader must be allowed 2059 * to continue however as this is a bad place to stop. 2060 */ 2061 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2062 mtx_lock_spin(&sched_lock); 2063 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2064 thread_unsuspend_one(td); 2065 } 2066 mtx_unlock_spin(&sched_lock); 2067 } 2068} 2069 2070 2071