kern_thread.c revision 111387
1/* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * $FreeBSD: head/sys/kern/kern_thread.c 111387 2003-02-24 00:49:55Z davidxu $ 29 */ 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/kernel.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/smp.h> 39#include <sys/sysctl.h> 40#include <sys/sysproto.h> 41#include <sys/filedesc.h> 42#include <sys/sched.h> 43#include <sys/signalvar.h> 44#include <sys/sx.h> 45#include <sys/tty.h> 46#include <sys/user.h> 47#include <sys/jail.h> 48#include <sys/kse.h> 49#include <sys/ktr.h> 50#include <sys/ucontext.h> 51 52#include <vm/vm.h> 53#include <vm/vm_object.h> 54#include <vm/pmap.h> 55#include <vm/uma.h> 56#include <vm/vm_map.h> 57 58#include <machine/frame.h> 59 60/* 61 * KSEGRP related storage. 62 */ 63static uma_zone_t ksegrp_zone; 64static uma_zone_t kse_zone; 65static uma_zone_t thread_zone; 66static uma_zone_t upcall_zone; 67 68/* DEBUG ONLY */ 69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70static int thread_debug = 0; 71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72 &thread_debug, 0, "thread debug"); 73 74static int max_threads_per_proc = 30; 75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76 &max_threads_per_proc, 0, "Limit on threads per proc"); 77 78static int max_groups_per_proc = 5; 79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81 82static int max_threads_hits; 83SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84 &max_threads_hits, 0, ""); 85 86static int virtual_cpu; 87 88#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 89 90TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93TAILQ_HEAD(, kse_upcall) zombie_upcalls = 94 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95struct mtx kse_zombie_lock; 96MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 97 98static void kse_purge(struct proc *p, struct thread *td); 99static void kse_purge_group(struct thread *td); 100static int thread_update_usr_ticks(struct thread *td); 101static int thread_update_sys_ticks(struct thread *td); 102static void thread_alloc_spare(struct thread *td, struct thread *spare); 103 104static int 105sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 106{ 107 int error, new_val; 108 int def_val; 109 110#ifdef SMP 111 def_val = mp_ncpus; 112#else 113 def_val = 1; 114#endif 115 if (virtual_cpu == 0) 116 new_val = def_val; 117 else 118 new_val = virtual_cpu; 119 error = sysctl_handle_int(oidp, &new_val, 0, req); 120 if (error != 0 || req->newptr == NULL) 121 return (error); 122 if (new_val < 0) 123 return (EINVAL); 124 virtual_cpu = new_val; 125 return (0); 126} 127 128/* DEBUG ONLY */ 129SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 130 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 131 "debug virtual cpus"); 132 133/* 134 * Prepare a thread for use. 135 */ 136static void 137thread_ctor(void *mem, int size, void *arg) 138{ 139 struct thread *td; 140 141 td = (struct thread *)mem; 142 td->td_state = TDS_INACTIVE; 143} 144 145/* 146 * Reclaim a thread after use. 147 */ 148static void 149thread_dtor(void *mem, int size, void *arg) 150{ 151 struct thread *td; 152 153 td = (struct thread *)mem; 154 155#ifdef INVARIANTS 156 /* Verify that this thread is in a safe state to free. */ 157 switch (td->td_state) { 158 case TDS_INHIBITED: 159 case TDS_RUNNING: 160 case TDS_CAN_RUN: 161 case TDS_RUNQ: 162 /* 163 * We must never unlink a thread that is in one of 164 * these states, because it is currently active. 165 */ 166 panic("bad state for thread unlinking"); 167 /* NOTREACHED */ 168 case TDS_INACTIVE: 169 break; 170 default: 171 panic("bad thread state"); 172 /* NOTREACHED */ 173 } 174#endif 175} 176 177/* 178 * Initialize type-stable parts of a thread (when newly created). 179 */ 180static void 181thread_init(void *mem, int size) 182{ 183 struct thread *td; 184 185 td = (struct thread *)mem; 186 mtx_lock(&Giant); 187 pmap_new_thread(td, 0); 188 mtx_unlock(&Giant); 189 cpu_thread_setup(td); 190 td->td_sched = (struct td_sched *)&td[1]; 191} 192 193/* 194 * Tear down type-stable parts of a thread (just before being discarded). 195 */ 196static void 197thread_fini(void *mem, int size) 198{ 199 struct thread *td; 200 201 td = (struct thread *)mem; 202 pmap_dispose_thread(td); 203} 204 205/* 206 * Initialize type-stable parts of a kse (when newly created). 207 */ 208static void 209kse_init(void *mem, int size) 210{ 211 struct kse *ke; 212 213 ke = (struct kse *)mem; 214 ke->ke_sched = (struct ke_sched *)&ke[1]; 215} 216 217/* 218 * Initialize type-stable parts of a ksegrp (when newly created). 219 */ 220static void 221ksegrp_init(void *mem, int size) 222{ 223 struct ksegrp *kg; 224 225 kg = (struct ksegrp *)mem; 226 kg->kg_sched = (struct kg_sched *)&kg[1]; 227} 228 229/* 230 * KSE is linked into kse group. 231 */ 232void 233kse_link(struct kse *ke, struct ksegrp *kg) 234{ 235 struct proc *p = kg->kg_proc; 236 237 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 238 kg->kg_kses++; 239 ke->ke_state = KES_UNQUEUED; 240 ke->ke_proc = p; 241 ke->ke_ksegrp = kg; 242 ke->ke_thread = NULL; 243 ke->ke_oncpu = NOCPU; 244 ke->ke_flags = 0; 245} 246 247void 248kse_unlink(struct kse *ke) 249{ 250 struct ksegrp *kg; 251 252 mtx_assert(&sched_lock, MA_OWNED); 253 kg = ke->ke_ksegrp; 254 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 255 if (ke->ke_state == KES_IDLE) { 256 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 257 kg->kg_idle_kses--; 258 } 259 if (--kg->kg_kses == 0) 260 ksegrp_unlink(kg); 261 /* 262 * Aggregate stats from the KSE 263 */ 264 kse_stash(ke); 265} 266 267void 268ksegrp_link(struct ksegrp *kg, struct proc *p) 269{ 270 271 TAILQ_INIT(&kg->kg_threads); 272 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 273 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 274 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 275 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 276 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 277 kg->kg_proc = p; 278 /* 279 * the following counters are in the -zero- section 280 * and may not need clearing 281 */ 282 kg->kg_numthreads = 0; 283 kg->kg_runnable = 0; 284 kg->kg_kses = 0; 285 kg->kg_runq_kses = 0; /* XXXKSE change name */ 286 kg->kg_idle_kses = 0; 287 kg->kg_numupcalls = 0; 288 /* link it in now that it's consistent */ 289 p->p_numksegrps++; 290 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 291} 292 293void 294ksegrp_unlink(struct ksegrp *kg) 295{ 296 struct proc *p; 297 298 mtx_assert(&sched_lock, MA_OWNED); 299 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 300 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 301 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 302 303 p = kg->kg_proc; 304 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 305 p->p_numksegrps--; 306 /* 307 * Aggregate stats from the KSE 308 */ 309 ksegrp_stash(kg); 310} 311 312struct kse_upcall * 313upcall_alloc(void) 314{ 315 struct kse_upcall *ku; 316 317 ku = uma_zalloc(upcall_zone, M_WAITOK); 318 bzero(ku, sizeof(*ku)); 319 return (ku); 320} 321 322void 323upcall_free(struct kse_upcall *ku) 324{ 325 326 uma_zfree(upcall_zone, ku); 327} 328 329void 330upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 331{ 332 333 mtx_assert(&sched_lock, MA_OWNED); 334 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 335 ku->ku_ksegrp = kg; 336 kg->kg_numupcalls++; 337} 338 339void 340upcall_unlink(struct kse_upcall *ku) 341{ 342 struct ksegrp *kg = ku->ku_ksegrp; 343 344 mtx_assert(&sched_lock, MA_OWNED); 345 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 346 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 347 kg->kg_numupcalls--; 348 upcall_stash(ku); 349} 350 351void 352upcall_remove(struct thread *td) 353{ 354 355 if (td->td_upcall) { 356 td->td_upcall->ku_owner = NULL; 357 upcall_unlink(td->td_upcall); 358 td->td_upcall = 0; 359 } 360} 361 362/* 363 * For a newly created process, 364 * link up all the structures and its initial threads etc. 365 */ 366void 367proc_linkup(struct proc *p, struct ksegrp *kg, 368 struct kse *ke, struct thread *td) 369{ 370 371 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 372 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 373 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 374 p->p_numksegrps = 0; 375 p->p_numthreads = 0; 376 377 ksegrp_link(kg, p); 378 kse_link(ke, kg); 379 thread_link(td, kg); 380} 381 382/* 383struct kse_thr_interrupt_args { 384 struct kse_thr_mailbox * tmbx; 385}; 386*/ 387int 388kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 389{ 390 struct proc *p; 391 struct thread *td2; 392 393 p = td->td_proc; 394 if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL)) 395 return (EINVAL); 396 mtx_lock_spin(&sched_lock); 397 FOREACH_THREAD_IN_PROC(p, td2) { 398 if (td2->td_mailbox == uap->tmbx) { 399 td2->td_flags |= TDF_INTERRUPT; 400 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 401 if (td2->td_flags & TDF_CVWAITQ) 402 cv_abort(td2); 403 else 404 abortsleep(td2); 405 } 406 mtx_unlock_spin(&sched_lock); 407 return (0); 408 } 409 } 410 mtx_unlock_spin(&sched_lock); 411 return (ESRCH); 412} 413 414/* 415struct kse_exit_args { 416 register_t dummy; 417}; 418*/ 419int 420kse_exit(struct thread *td, struct kse_exit_args *uap) 421{ 422 struct proc *p; 423 struct ksegrp *kg; 424 struct kse *ke; 425 426 p = td->td_proc; 427 /* 428 * Only UTS can call the syscall and current group 429 * should be a threaded group. 430 */ 431 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 432 return (EINVAL); 433 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 434 435 kg = td->td_ksegrp; 436 /* Serialize removing upcall */ 437 PROC_LOCK(p); 438 mtx_lock_spin(&sched_lock); 439 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { 440 mtx_unlock_spin(&sched_lock); 441 PROC_UNLOCK(p); 442 return (EDEADLK); 443 } 444 ke = td->td_kse; 445 upcall_remove(td); 446 if (p->p_numthreads == 1) { 447 kse_purge(p, td); 448 p->p_flag &= ~P_KSES; 449 mtx_unlock_spin(&sched_lock); 450 PROC_UNLOCK(p); 451 } else { 452 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 453 kse_purge_group(td); 454 ke->ke_flags |= KEF_EXIT; 455 } 456 thread_exit(); 457 /* NOTREACHED */ 458 } 459 return (0); 460} 461 462/* 463 * Either becomes an upcall or waits for an awakening event and 464 * then becomes an upcall. Only error cases return. 465 */ 466/* 467struct kse_release_args { 468 struct timespec *timeout; 469}; 470*/ 471int 472kse_release(struct thread *td, struct kse_release_args *uap) 473{ 474 struct proc *p; 475 struct ksegrp *kg; 476 struct timespec ts, ts2, ts3, timeout; 477 struct timeval tv; 478 int error; 479 480 p = td->td_proc; 481 kg = td->td_ksegrp; 482 /* 483 * Only UTS can call the syscall and current group 484 * should be a threaded group. 485 */ 486 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 487 return (EINVAL); 488 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 489 if (uap->timeout != NULL) { 490 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 491 return (error); 492 getnanouptime(&ts); 493 timespecadd(&ts, &timeout); 494 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 495 } 496 mtx_lock_spin(&sched_lock); 497 /* Change OURSELF to become an upcall. */ 498 td->td_flags = TDF_UPCALLING; 499 if (p->p_sflag & PS_NEEDSIGCHK) 500 td->td_flags |= TDF_ASTPENDING; 501 mtx_unlock_spin(&sched_lock); 502 PROC_LOCK(p); 503 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 504 (kg->kg_completed == NULL)) { 505 kg->kg_upsleeps++; 506 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 507 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0)); 508 kg->kg_upsleeps--; 509 PROC_UNLOCK(p); 510 if (uap->timeout == NULL || error != EWOULDBLOCK) 511 return (0); 512 getnanouptime(&ts2); 513 if (timespeccmp(&ts2, &ts, >=)) 514 return (0); 515 ts3 = ts; 516 timespecsub(&ts3, &ts2); 517 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 518 PROC_LOCK(p); 519 } 520 PROC_UNLOCK(p); 521 return (0); 522} 523 524/* struct kse_wakeup_args { 525 struct kse_mailbox *mbx; 526}; */ 527int 528kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 529{ 530 struct proc *p; 531 struct ksegrp *kg; 532 struct kse_upcall *ku; 533 struct thread *td2; 534 535 p = td->td_proc; 536 td2 = NULL; 537 ku = NULL; 538 /* KSE-enabled processes only, please. */ 539 if (!(p->p_flag & P_KSES)) 540 return (EINVAL); 541 PROC_LOCK(p); 542 mtx_lock_spin(&sched_lock); 543 if (uap->mbx) { 544 FOREACH_KSEGRP_IN_PROC(p, kg) { 545 FOREACH_UPCALL_IN_GROUP(kg, ku) { 546 if (ku->ku_mailbox == uap->mbx) 547 break; 548 } 549 if (ku) 550 break; 551 } 552 } else { 553 kg = td->td_ksegrp; 554 if (kg->kg_upsleeps) { 555 wakeup_one(&kg->kg_completed); 556 mtx_unlock_spin(&sched_lock); 557 PROC_UNLOCK(p); 558 return (0); 559 } 560 ku = TAILQ_FIRST(&kg->kg_upcalls); 561 } 562 if (ku) { 563 if ((td2 = ku->ku_owner) == NULL) { 564 panic("%s: no owner", __func__); 565 } else if (TD_ON_SLEEPQ(td2) && 566 (td2->td_wchan == &kg->kg_completed)) { 567 abortsleep(td2); 568 } else { 569 ku->ku_flags |= KUF_DOUPCALL; 570 } 571 mtx_unlock_spin(&sched_lock); 572 PROC_UNLOCK(p); 573 return (0); 574 } 575 mtx_unlock_spin(&sched_lock); 576 PROC_UNLOCK(p); 577 return (ESRCH); 578} 579 580/* 581 * No new KSEG: first call: use current KSE, don't schedule an upcall 582 * All other situations, do allocate max new KSEs and schedule an upcall. 583 */ 584/* struct kse_create_args { 585 struct kse_mailbox *mbx; 586 int newgroup; 587}; */ 588int 589kse_create(struct thread *td, struct kse_create_args *uap) 590{ 591 struct kse *newke; 592 struct ksegrp *newkg; 593 struct ksegrp *kg; 594 struct proc *p; 595 struct kse_mailbox mbx; 596 struct kse_upcall *newku; 597 int err, ncpus; 598 599 p = td->td_proc; 600 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 601 return (err); 602 603 /* Too bad, why hasn't kernel always a cpu counter !? */ 604#ifdef SMP 605 ncpus = mp_ncpus; 606#else 607 ncpus = 1; 608#endif 609 if (thread_debug && virtual_cpu != 0) 610 ncpus = virtual_cpu; 611 612 /* Easier to just set it than to test and set */ 613 p->p_flag |= P_KSES; 614 kg = td->td_ksegrp; 615 if (uap->newgroup) { 616 /* Have race condition but it is cheap */ 617 if (p->p_numksegrps >= max_groups_per_proc) 618 return (EPROCLIM); 619 /* 620 * If we want a new KSEGRP it doesn't matter whether 621 * we have already fired up KSE mode before or not. 622 * We put the process in KSE mode and create a new KSEGRP. 623 */ 624 newkg = ksegrp_alloc(); 625 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 626 kg_startzero, kg_endzero)); 627 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 628 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 629 mtx_lock_spin(&sched_lock); 630 ksegrp_link(newkg, p); 631 if (p->p_numksegrps >= max_groups_per_proc) { 632 ksegrp_unlink(newkg); 633 mtx_unlock_spin(&sched_lock); 634 return (EPROCLIM); 635 } 636 mtx_unlock_spin(&sched_lock); 637 } else { 638 newkg = kg; 639 } 640 641 /* 642 * Creating upcalls more than number of physical cpu does 643 * not help performance. 644 */ 645 if (newkg->kg_numupcalls >= ncpus) 646 return (EPROCLIM); 647 648 if (newkg->kg_numupcalls == 0) { 649 /* 650 * Initialize KSE group, optimized for MP. 651 * Create KSEs as many as physical cpus, this increases 652 * concurrent even if userland is not MP safe and can only run 653 * on single CPU (for early version of libpthread, it is true). 654 * In ideal world, every physical cpu should execute a thread. 655 * If there is enough KSEs, threads in kernel can be 656 * executed parallel on different cpus with full speed, 657 * Concurrent in kernel shouldn't be restricted by number of 658 * upcalls userland provides. 659 * Adding more upcall structures only increases concurrent 660 * in userland. 661 * Highest performance configuration is: 662 * N kses = N upcalls = N phyiscal cpus 663 */ 664 while (newkg->kg_kses < ncpus) { 665 newke = kse_alloc(); 666 bzero(&newke->ke_startzero, RANGEOF(struct kse, 667 ke_startzero, ke_endzero)); 668#if 0 669 mtx_lock_spin(&sched_lock); 670 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 671 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 672 mtx_unlock_spin(&sched_lock); 673#endif 674 mtx_lock_spin(&sched_lock); 675 kse_link(newke, newkg); 676 /* Add engine */ 677 kse_reassign(newke); 678 mtx_unlock_spin(&sched_lock); 679 } 680 } 681 newku = upcall_alloc(); 682 newku->ku_mailbox = uap->mbx; 683 newku->ku_func = mbx.km_func; 684 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 685 686 /* For the first call this may not have been set */ 687 if (td->td_standin == NULL) 688 thread_alloc_spare(td, NULL); 689 690 mtx_lock_spin(&sched_lock); 691 if (newkg->kg_numupcalls >= ncpus) { 692 upcall_free(newku); 693 mtx_unlock_spin(&sched_lock); 694 return (EPROCLIM); 695 } 696 upcall_link(newku, newkg); 697 698 /* 699 * Each upcall structure has an owner thread, find which 700 * one owns it. 701 */ 702 if (uap->newgroup) { 703 /* 704 * Because new ksegrp hasn't thread, 705 * create an initial upcall thread to own it. 706 */ 707 thread_schedule_upcall(td, newku); 708 } else { 709 /* 710 * If current thread hasn't an upcall structure, 711 * just assign the upcall to it. 712 */ 713 if (td->td_upcall == NULL) { 714 newku->ku_owner = td; 715 td->td_upcall = newku; 716 } else { 717 /* 718 * Create a new upcall thread to own it. 719 */ 720 thread_schedule_upcall(td, newku); 721 } 722 } 723 mtx_unlock_spin(&sched_lock); 724 return (0); 725} 726 727/* 728 * Fill a ucontext_t with a thread's context information. 729 * 730 * This is an analogue to getcontext(3). 731 */ 732void 733thread_getcontext(struct thread *td, ucontext_t *uc) 734{ 735 736/* 737 * XXX this is declared in a MD include file, i386/include/ucontext.h but 738 * is used in MI code. 739 */ 740#ifdef __i386__ 741 get_mcontext(td, &uc->uc_mcontext); 742#endif 743 uc->uc_sigmask = td->td_proc->p_sigmask; 744} 745 746/* 747 * Set a thread's context from a ucontext_t. 748 * 749 * This is an analogue to setcontext(3). 750 */ 751int 752thread_setcontext(struct thread *td, ucontext_t *uc) 753{ 754 int ret; 755 756/* 757 * XXX this is declared in a MD include file, i386/include/ucontext.h but 758 * is used in MI code. 759 */ 760#ifdef __i386__ 761 ret = set_mcontext(td, &uc->uc_mcontext); 762#else 763 ret = ENOSYS; 764#endif 765 if (ret == 0) { 766 SIG_CANTMASK(uc->uc_sigmask); 767 PROC_LOCK(td->td_proc); 768 td->td_proc->p_sigmask = uc->uc_sigmask; 769 PROC_UNLOCK(td->td_proc); 770 } 771 return (ret); 772} 773 774/* 775 * Initialize global thread allocation resources. 776 */ 777void 778threadinit(void) 779{ 780 781#ifndef __ia64__ 782 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 783 thread_ctor, thread_dtor, thread_init, thread_fini, 784 UMA_ALIGN_CACHE, 0); 785#else 786 /* 787 * XXX the ia64 kstack allocator is really lame and is at the mercy 788 * of contigmallloc(). This hackery is to pre-construct a whole 789 * pile of thread structures with associated kernel stacks early 790 * in the system startup while contigmalloc() still works. Once we 791 * have them, keep them. Sigh. 792 */ 793 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 794 thread_ctor, thread_dtor, thread_init, thread_fini, 795 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); 796 uma_prealloc(thread_zone, 512); /* XXX arbitary */ 797#endif 798 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 799 NULL, NULL, ksegrp_init, NULL, 800 UMA_ALIGN_CACHE, 0); 801 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 802 NULL, NULL, kse_init, NULL, 803 UMA_ALIGN_CACHE, 0); 804 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 805 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 806} 807 808/* 809 * Stash an embarasingly extra thread into the zombie thread queue. 810 */ 811void 812thread_stash(struct thread *td) 813{ 814 mtx_lock_spin(&kse_zombie_lock); 815 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 816 mtx_unlock_spin(&kse_zombie_lock); 817} 818 819/* 820 * Stash an embarasingly extra kse into the zombie kse queue. 821 */ 822void 823kse_stash(struct kse *ke) 824{ 825 mtx_lock_spin(&kse_zombie_lock); 826 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 827 mtx_unlock_spin(&kse_zombie_lock); 828} 829 830/* 831 * Stash an embarasingly extra upcall into the zombie upcall queue. 832 */ 833 834void 835upcall_stash(struct kse_upcall *ku) 836{ 837 mtx_lock_spin(&kse_zombie_lock); 838 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 839 mtx_unlock_spin(&kse_zombie_lock); 840} 841 842/* 843 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 844 */ 845void 846ksegrp_stash(struct ksegrp *kg) 847{ 848 mtx_lock_spin(&kse_zombie_lock); 849 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 850 mtx_unlock_spin(&kse_zombie_lock); 851} 852 853/* 854 * Reap zombie kse resource. 855 */ 856void 857thread_reap(void) 858{ 859 struct thread *td_first, *td_next; 860 struct kse *ke_first, *ke_next; 861 struct ksegrp *kg_first, * kg_next; 862 struct kse_upcall *ku_first, *ku_next; 863 864 /* 865 * Don't even bother to lock if none at this instant, 866 * we really don't care about the next instant.. 867 */ 868 if ((!TAILQ_EMPTY(&zombie_threads)) 869 || (!TAILQ_EMPTY(&zombie_kses)) 870 || (!TAILQ_EMPTY(&zombie_ksegrps)) 871 || (!TAILQ_EMPTY(&zombie_upcalls))) { 872 mtx_lock_spin(&kse_zombie_lock); 873 td_first = TAILQ_FIRST(&zombie_threads); 874 ke_first = TAILQ_FIRST(&zombie_kses); 875 kg_first = TAILQ_FIRST(&zombie_ksegrps); 876 ku_first = TAILQ_FIRST(&zombie_upcalls); 877 if (td_first) 878 TAILQ_INIT(&zombie_threads); 879 if (ke_first) 880 TAILQ_INIT(&zombie_kses); 881 if (kg_first) 882 TAILQ_INIT(&zombie_ksegrps); 883 if (ku_first) 884 TAILQ_INIT(&zombie_upcalls); 885 mtx_unlock_spin(&kse_zombie_lock); 886 while (td_first) { 887 td_next = TAILQ_NEXT(td_first, td_runq); 888 if (td_first->td_ucred) 889 crfree(td_first->td_ucred); 890 thread_free(td_first); 891 td_first = td_next; 892 } 893 while (ke_first) { 894 ke_next = TAILQ_NEXT(ke_first, ke_procq); 895 kse_free(ke_first); 896 ke_first = ke_next; 897 } 898 while (kg_first) { 899 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 900 ksegrp_free(kg_first); 901 kg_first = kg_next; 902 } 903 while (ku_first) { 904 ku_next = TAILQ_NEXT(ku_first, ku_link); 905 upcall_free(ku_first); 906 ku_first = ku_next; 907 } 908 } 909} 910 911/* 912 * Allocate a ksegrp. 913 */ 914struct ksegrp * 915ksegrp_alloc(void) 916{ 917 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 918} 919 920/* 921 * Allocate a kse. 922 */ 923struct kse * 924kse_alloc(void) 925{ 926 return (uma_zalloc(kse_zone, M_WAITOK)); 927} 928 929/* 930 * Allocate a thread. 931 */ 932struct thread * 933thread_alloc(void) 934{ 935 thread_reap(); /* check if any zombies to get */ 936 return (uma_zalloc(thread_zone, M_WAITOK)); 937} 938 939/* 940 * Deallocate a ksegrp. 941 */ 942void 943ksegrp_free(struct ksegrp *td) 944{ 945 uma_zfree(ksegrp_zone, td); 946} 947 948/* 949 * Deallocate a kse. 950 */ 951void 952kse_free(struct kse *td) 953{ 954 uma_zfree(kse_zone, td); 955} 956 957/* 958 * Deallocate a thread. 959 */ 960void 961thread_free(struct thread *td) 962{ 963 964 cpu_thread_clean(td); 965 uma_zfree(thread_zone, td); 966} 967 968/* 969 * Store the thread context in the UTS's mailbox. 970 * then add the mailbox at the head of a list we are building in user space. 971 * The list is anchored in the ksegrp structure. 972 */ 973int 974thread_export_context(struct thread *td) 975{ 976 struct proc *p; 977 struct ksegrp *kg; 978 uintptr_t mbx; 979 void *addr; 980 int error,temp; 981 ucontext_t uc; 982 983 p = td->td_proc; 984 kg = td->td_ksegrp; 985 986 /* Export the user/machine context. */ 987 addr = (void *)(&td->td_mailbox->tm_context); 988 error = copyin(addr, &uc, sizeof(ucontext_t)); 989 if (error) 990 goto bad; 991 992 thread_getcontext(td, &uc); 993 error = copyout(&uc, addr, sizeof(ucontext_t)); 994 if (error) 995 goto bad; 996 997 /* Exports clock ticks in kernel mode */ 998 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 999 temp = fuword(addr) + td->td_usticks; 1000 if (suword(addr, temp)) 1001 goto bad; 1002 1003 /* Get address in latest mbox of list pointer */ 1004 addr = (void *)(&td->td_mailbox->tm_next); 1005 /* 1006 * Put the saved address of the previous first 1007 * entry into this one 1008 */ 1009 for (;;) { 1010 mbx = (uintptr_t)kg->kg_completed; 1011 if (suword(addr, mbx)) { 1012 error = EFAULT; 1013 goto bad; 1014 } 1015 PROC_LOCK(p); 1016 if (mbx == (uintptr_t)kg->kg_completed) { 1017 kg->kg_completed = td->td_mailbox; 1018 /* 1019 * The thread context may be taken away by 1020 * other upcall threads when we unlock 1021 * process lock. it's no longer valid to 1022 * use it again in any other places. 1023 */ 1024 td->td_mailbox = NULL; 1025 PROC_UNLOCK(p); 1026 break; 1027 } 1028 PROC_UNLOCK(p); 1029 } 1030 td->td_usticks = 0; 1031 return (0); 1032 1033bad: 1034 PROC_LOCK(p); 1035 psignal(p, SIGSEGV); 1036 PROC_UNLOCK(p); 1037 /* The mailbox is bad, don't use it */ 1038 td->td_mailbox = NULL; 1039 td->td_usticks = 0; 1040 return (error); 1041} 1042 1043/* 1044 * Take the list of completed mailboxes for this KSEGRP and put them on this 1045 * upcall's mailbox as it's the next one going up. 1046 */ 1047static int 1048thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1049{ 1050 struct proc *p = kg->kg_proc; 1051 void *addr; 1052 uintptr_t mbx; 1053 1054 addr = (void *)(&ku->ku_mailbox->km_completed); 1055 for (;;) { 1056 mbx = (uintptr_t)kg->kg_completed; 1057 if (suword(addr, mbx)) { 1058 PROC_LOCK(p); 1059 psignal(p, SIGSEGV); 1060 PROC_UNLOCK(p); 1061 return (EFAULT); 1062 } 1063 /* XXXKSE could use atomic CMPXCH here */ 1064 PROC_LOCK(p); 1065 if (mbx == (uintptr_t)kg->kg_completed) { 1066 kg->kg_completed = NULL; 1067 PROC_UNLOCK(p); 1068 break; 1069 } 1070 PROC_UNLOCK(p); 1071 } 1072 return (0); 1073} 1074 1075/* 1076 * This function should be called at statclock interrupt time 1077 */ 1078int 1079thread_statclock(int user) 1080{ 1081 struct thread *td = curthread; 1082 1083 if (td->td_ksegrp->kg_numupcalls == 0) 1084 return (-1); 1085 if (user) { 1086 /* Current always do via ast() */ 1087 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1088 td->td_uuticks++; 1089 } else { 1090 if (td->td_mailbox != NULL) 1091 td->td_usticks++; 1092 else { 1093 /* XXXKSE 1094 * We will call thread_user_enter() for every 1095 * kernel entry in future, so if the thread mailbox 1096 * is NULL, it must be a UTS kernel, don't account 1097 * clock ticks for it. 1098 */ 1099 } 1100 } 1101 return (0); 1102} 1103 1104/* 1105 * Export user mode state clock ticks 1106 */ 1107static int 1108thread_update_usr_ticks(struct thread *td) 1109{ 1110 struct proc *p = td->td_proc; 1111 struct kse_thr_mailbox *tmbx; 1112 struct kse_upcall *ku; 1113 caddr_t addr; 1114 uint uticks; 1115 1116 if ((ku = td->td_upcall) == NULL) 1117 return (-1); 1118 1119 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1120 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1121 return (-1); 1122 uticks = td->td_uuticks; 1123 td->td_uuticks = 0; 1124 if (uticks) { 1125 addr = (caddr_t)&tmbx->tm_uticks; 1126 uticks += fuword(addr); 1127 if (suword(addr, uticks)) { 1128 PROC_LOCK(p); 1129 psignal(p, SIGSEGV); 1130 PROC_UNLOCK(p); 1131 return (-2); 1132 } 1133 } 1134 return (0); 1135} 1136 1137/* 1138 * Export kernel mode state clock ticks 1139 */ 1140 1141static int 1142thread_update_sys_ticks(struct thread *td) 1143{ 1144 struct proc *p = td->td_proc; 1145 caddr_t addr; 1146 int sticks; 1147 1148 if (td->td_mailbox == NULL) 1149 return (-1); 1150 if (td->td_usticks == 0) 1151 return (0); 1152 addr = (caddr_t)&td->td_mailbox->tm_sticks; 1153 sticks = fuword(addr); 1154 sticks += td->td_usticks; 1155 td->td_usticks = 0; 1156 if (suword(addr, sticks)) { 1157 PROC_LOCK(p); 1158 psignal(p, SIGSEGV); 1159 PROC_UNLOCK(p); 1160 return (-2); 1161 } 1162 return (0); 1163} 1164 1165/* 1166 * Discard the current thread and exit from its context. 1167 * 1168 * Because we can't free a thread while we're operating under its context, 1169 * push the current thread into our CPU's deadthread holder. This means 1170 * we needn't worry about someone else grabbing our context before we 1171 * do a cpu_throw(). 1172 */ 1173void 1174thread_exit(void) 1175{ 1176 struct thread *td; 1177 struct kse *ke; 1178 struct proc *p; 1179 struct ksegrp *kg; 1180 1181 td = curthread; 1182 kg = td->td_ksegrp; 1183 p = td->td_proc; 1184 ke = td->td_kse; 1185 1186 mtx_assert(&sched_lock, MA_OWNED); 1187 KASSERT(p != NULL, ("thread exiting without a process")); 1188 KASSERT(ke != NULL, ("thread exiting without a kse")); 1189 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1190 PROC_LOCK_ASSERT(p, MA_OWNED); 1191 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1192 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1193 1194 if (td->td_standin != NULL) { 1195 thread_stash(td->td_standin); 1196 td->td_standin = NULL; 1197 } 1198 1199 cpu_thread_exit(td); /* XXXSMP */ 1200 1201 /* 1202 * The last thread is left attached to the process 1203 * So that the whole bundle gets recycled. Skip 1204 * all this stuff. 1205 */ 1206 if (p->p_numthreads > 1) { 1207 /* 1208 * Unlink this thread from its proc and the kseg. 1209 * In keeping with the other structs we probably should 1210 * have a thread_unlink() that does some of this but it 1211 * would only be called from here (I think) so it would 1212 * be a waste. (might be useful for proc_fini() as well.) 1213 */ 1214 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1215 p->p_numthreads--; 1216 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1217 kg->kg_numthreads--; 1218 if (p->p_maxthrwaits) 1219 wakeup(&p->p_numthreads); 1220 /* 1221 * The test below is NOT true if we are the 1222 * sole exiting thread. P_STOPPED_SNGL is unset 1223 * in exit1() after it is the only survivor. 1224 */ 1225 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1226 if (p->p_numthreads == p->p_suspcount) { 1227 thread_unsuspend_one(p->p_singlethread); 1228 } 1229 } 1230 1231 /* 1232 * Because each upcall structure has an owner thread, 1233 * owner thread exits only when process is in exiting 1234 * state, so upcall to userland is no longer needed, 1235 * deleting upcall structure is safe here. 1236 * So when all threads in a group is exited, all upcalls 1237 * in the group should be automatically freed. 1238 */ 1239 if (td->td_upcall) 1240 upcall_remove(td); 1241 1242 ke->ke_state = KES_UNQUEUED; 1243 ke->ke_thread = NULL; 1244 /* 1245 * Decide what to do with the KSE attached to this thread. 1246 */ 1247 if (ke->ke_flags & KEF_EXIT) 1248 kse_unlink(ke); 1249 else 1250 kse_reassign(ke); 1251 PROC_UNLOCK(p); 1252 td->td_kse = NULL; 1253 td->td_state = TDS_INACTIVE; 1254 td->td_proc = NULL; 1255 td->td_ksegrp = NULL; 1256 td->td_last_kse = NULL; 1257 PCPU_SET(deadthread, td); 1258 } else { 1259 PROC_UNLOCK(p); 1260 } 1261 cpu_throw(); 1262 /* NOTREACHED */ 1263} 1264 1265/* 1266 * Do any thread specific cleanups that may be needed in wait() 1267 * called with Giant held, proc and schedlock not held. 1268 */ 1269void 1270thread_wait(struct proc *p) 1271{ 1272 struct thread *td; 1273 1274 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1275 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1276 FOREACH_THREAD_IN_PROC(p, td) { 1277 if (td->td_standin != NULL) { 1278 thread_free(td->td_standin); 1279 td->td_standin = NULL; 1280 } 1281 cpu_thread_clean(td); 1282 } 1283 thread_reap(); /* check for zombie threads etc. */ 1284} 1285 1286/* 1287 * Link a thread to a process. 1288 * set up anything that needs to be initialized for it to 1289 * be used by the process. 1290 * 1291 * Note that we do not link to the proc's ucred here. 1292 * The thread is linked as if running but no KSE assigned. 1293 */ 1294void 1295thread_link(struct thread *td, struct ksegrp *kg) 1296{ 1297 struct proc *p; 1298 1299 p = kg->kg_proc; 1300 td->td_state = TDS_INACTIVE; 1301 td->td_proc = p; 1302 td->td_ksegrp = kg; 1303 td->td_last_kse = NULL; 1304 td->td_flags = 0; 1305 td->td_kse = NULL; 1306 1307 LIST_INIT(&td->td_contested); 1308 callout_init(&td->td_slpcallout, 1); 1309 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1310 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1311 p->p_numthreads++; 1312 kg->kg_numthreads++; 1313} 1314 1315/* 1316 * Purge a ksegrp resource. When a ksegrp is preparing to 1317 * exit, it calls this function. 1318 */ 1319void 1320kse_purge_group(struct thread *td) 1321{ 1322 struct ksegrp *kg; 1323 struct kse *ke; 1324 1325 kg = td->td_ksegrp; 1326 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1327 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1328 KASSERT(ke->ke_state == KES_IDLE, 1329 ("%s: wrong idle KSE state", __func__)); 1330 kse_unlink(ke); 1331 } 1332 KASSERT((kg->kg_kses == 1), 1333 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1334 KASSERT((kg->kg_numupcalls == 0), 1335 ("%s: ksegrp still has %d upcall datas", 1336 __func__, kg->kg_numupcalls)); 1337} 1338 1339/* 1340 * Purge a process's KSE resource. When a process is preparing to 1341 * exit, it calls kse_purge to release any extra KSE resources in 1342 * the process. 1343 */ 1344void 1345kse_purge(struct proc *p, struct thread *td) 1346{ 1347 struct ksegrp *kg; 1348 struct kse *ke; 1349 1350 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1351 mtx_lock_spin(&sched_lock); 1352 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1353 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1354 p->p_numksegrps--; 1355 /* 1356 * There is no ownership for KSE, after all threads 1357 * in the group exited, it is possible that some KSEs 1358 * were left in idle queue, gc them now. 1359 */ 1360 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1361 KASSERT(ke->ke_state == KES_IDLE, 1362 ("%s: wrong idle KSE state", __func__)); 1363 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1364 kg->kg_idle_kses--; 1365 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1366 kg->kg_kses--; 1367 kse_stash(ke); 1368 } 1369 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1370 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1371 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1372 KASSERT((kg->kg_numupcalls == 0), 1373 ("%s: ksegrp still has %d upcall datas", 1374 __func__, kg->kg_numupcalls)); 1375 1376 if (kg != td->td_ksegrp) 1377 ksegrp_stash(kg); 1378 } 1379 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1380 p->p_numksegrps++; 1381 mtx_unlock_spin(&sched_lock); 1382} 1383 1384/* 1385 * This function is intended to be used to initialize a spare thread 1386 * for upcall. Initialize thread's large data area outside sched_lock 1387 * for thread_schedule_upcall(). 1388 */ 1389void 1390thread_alloc_spare(struct thread *td, struct thread *spare) 1391{ 1392 if (td->td_standin) 1393 return; 1394 if (spare == NULL) 1395 spare = thread_alloc(); 1396 td->td_standin = spare; 1397 bzero(&spare->td_startzero, 1398 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1399 spare->td_proc = td->td_proc; 1400 /* Setup PCB and fork address */ 1401 cpu_set_upcall(spare, td->td_pcb); 1402 /* 1403 * XXXKSE do we really need this? (default values for the 1404 * frame). 1405 */ 1406 bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe)); 1407 spare->td_ucred = crhold(td->td_ucred); 1408} 1409 1410/* 1411 * Create a thread and schedule it for upcall on the KSE given. 1412 * Use our thread's standin so that we don't have to allocate one. 1413 */ 1414struct thread * 1415thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1416{ 1417 struct thread *td2; 1418 1419 mtx_assert(&sched_lock, MA_OWNED); 1420 1421 /* 1422 * Schedule an upcall thread on specified kse_upcall, 1423 * the kse_upcall must be free. 1424 * td must have a spare thread. 1425 */ 1426 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1427 if ((td2 = td->td_standin) != NULL) { 1428 td->td_standin = NULL; 1429 } else { 1430 panic("no reserve thread when scheduling an upcall"); 1431 return (NULL); 1432 } 1433 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1434 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1435 bcopy(&td->td_startcopy, &td2->td_startcopy, 1436 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1437 thread_link(td2, ku->ku_ksegrp); 1438 /* Let the new thread become owner of the upcall */ 1439 ku->ku_owner = td2; 1440 td2->td_upcall = ku; 1441 td2->td_flags = TDF_UPCALLING; 1442 if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1443 td2->td_flags |= TDF_ASTPENDING; 1444 td2->td_kse = NULL; 1445 td2->td_state = TDS_CAN_RUN; 1446 td2->td_inhibitors = 0; 1447 setrunqueue(td2); 1448 return (td2); /* bogus.. should be a void function */ 1449} 1450 1451void 1452thread_signal_add(struct thread *td, int sig) 1453{ 1454 struct kse_upcall *ku; 1455 struct proc *p; 1456 sigset_t ss; 1457 int error; 1458 1459 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1460 td = curthread; 1461 ku = td->td_upcall; 1462 p = td->td_proc; 1463 1464 PROC_UNLOCK(p); 1465 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1466 if (error) 1467 goto error; 1468 1469 SIGADDSET(ss, sig); 1470 1471 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1472 if (error) 1473 goto error; 1474 1475 PROC_LOCK(p); 1476 return; 1477error: 1478 PROC_LOCK(p); 1479 sigexit(td, SIGILL); 1480} 1481 1482 1483/* 1484 * Schedule an upcall to notify a KSE process recieved signals. 1485 * 1486 */ 1487void 1488thread_signal_upcall(struct thread *td) 1489{ 1490 mtx_lock_spin(&sched_lock); 1491 td->td_flags |= TDF_UPCALLING; 1492 mtx_unlock_spin(&sched_lock); 1493 1494 return; 1495} 1496 1497/* 1498 * Setup done on the thread when it enters the kernel. 1499 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1500 */ 1501void 1502thread_user_enter(struct proc *p, struct thread *td) 1503{ 1504 struct ksegrp *kg; 1505 struct kse_upcall *ku; 1506 1507 kg = td->td_ksegrp; 1508 /* 1509 * First check that we shouldn't just abort. 1510 * But check if we are the single thread first! 1511 * XXX p_singlethread not locked, but should be safe. 1512 */ 1513 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1514 PROC_LOCK(p); 1515 mtx_lock_spin(&sched_lock); 1516 thread_exit(); 1517 /* NOTREACHED */ 1518 } 1519 1520 /* 1521 * If we are doing a syscall in a KSE environment, 1522 * note where our mailbox is. There is always the 1523 * possibility that we could do this lazily (in kse_reassign()), 1524 * but for now do it every time. 1525 */ 1526 kg = td->td_ksegrp; 1527 if (kg->kg_numupcalls) { 1528 ku = td->td_upcall; 1529 KASSERT(ku, ("%s: no upcall owned", __func__)); 1530 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1531 td->td_mailbox = 1532 (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1533 if ((td->td_mailbox == NULL) || 1534 (td->td_mailbox == (void *)-1)) { 1535 /* Don't schedule upcall when blocked */ 1536 td->td_mailbox = NULL; 1537 mtx_lock_spin(&sched_lock); 1538 td->td_flags &= ~TDF_CAN_UNBIND; 1539 mtx_unlock_spin(&sched_lock); 1540 } else { 1541 if (td->td_standin == NULL) 1542 thread_alloc_spare(td, NULL); 1543 mtx_lock_spin(&sched_lock); 1544 td->td_flags |= TDF_CAN_UNBIND; 1545 mtx_unlock_spin(&sched_lock); 1546 } 1547 } 1548} 1549 1550/* 1551 * The extra work we go through if we are a threaded process when we 1552 * return to userland. 1553 * 1554 * If we are a KSE process and returning to user mode, check for 1555 * extra work to do before we return (e.g. for more syscalls 1556 * to complete first). If we were in a critical section, we should 1557 * just return to let it finish. Same if we were in the UTS (in 1558 * which case the mailbox's context's busy indicator will be set). 1559 * The only traps we suport will have set the mailbox. 1560 * We will clear it here. 1561 */ 1562int 1563thread_userret(struct thread *td, struct trapframe *frame) 1564{ 1565 int error = 0, upcalls; 1566 struct kse_upcall *ku; 1567 struct ksegrp *kg, *kg2; 1568 struct proc *p; 1569 struct timespec ts; 1570 1571 p = td->td_proc; 1572 kg = td->td_ksegrp; 1573 1574 /* Nothing to do with non-threaded group/process */ 1575 if (td->td_ksegrp->kg_numupcalls == 0) 1576 return (0); 1577 1578 /* 1579 * Stat clock interrupt hit in userland, it 1580 * is returning from interrupt, charge thread's 1581 * userland time for UTS. 1582 */ 1583 if (td->td_flags & TDF_USTATCLOCK) { 1584 thread_update_usr_ticks(td); 1585 mtx_lock_spin(&sched_lock); 1586 td->td_flags &= ~TDF_USTATCLOCK; 1587 mtx_unlock_spin(&sched_lock); 1588 } 1589 1590 /* 1591 * Optimisation: 1592 * This thread has not started any upcall. 1593 * If there is no work to report other than ourself, 1594 * then it can return direct to userland. 1595 */ 1596 if (TD_CAN_UNBIND(td)) { 1597 mtx_lock_spin(&sched_lock); 1598 td->td_flags &= ~TDF_CAN_UNBIND; 1599 mtx_unlock_spin(&sched_lock); 1600 if ((kg->kg_completed == NULL) && 1601 (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) { 1602 thread_update_sys_ticks(td); 1603 td->td_mailbox = NULL; 1604 return (0); 1605 } 1606 error = thread_export_context(td); 1607 if (error) { 1608 /* 1609 * Failing to do the KSE operation just defaults 1610 * back to synchonous operation, so just return from 1611 * the syscall. 1612 */ 1613 return (0); 1614 } 1615 /* 1616 * There is something to report, and we own an upcall 1617 * strucuture, we can go to userland. 1618 * Turn ourself into an upcall thread. 1619 */ 1620 mtx_lock_spin(&sched_lock); 1621 td->td_flags |= TDF_UPCALLING; 1622 mtx_unlock_spin(&sched_lock); 1623 } else if (td->td_mailbox) { 1624 error = thread_export_context(td); 1625 if (error) { 1626 PROC_LOCK(td->td_proc); 1627 mtx_lock_spin(&sched_lock); 1628 /* possibly upcall with error? */ 1629 } else { 1630 PROC_LOCK(td->td_proc); 1631 mtx_lock_spin(&sched_lock); 1632 /* 1633 * There are upcall threads waiting for 1634 * work to do, wake one of them up. 1635 * XXXKSE Maybe wake all of them up. 1636 */ 1637 if (kg->kg_upsleeps) 1638 wakeup_one(&kg->kg_completed); 1639 } 1640 thread_exit(); 1641 /* NOTREACHED */ 1642 } 1643 1644 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1645 1646 if (p->p_numthreads > max_threads_per_proc) { 1647 max_threads_hits++; 1648 PROC_LOCK(p); 1649 while (p->p_numthreads > max_threads_per_proc) { 1650 if (P_SHOULDSTOP(p)) 1651 break; 1652 upcalls = 0; 1653 mtx_lock_spin(&sched_lock); 1654 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1655 if (kg2->kg_numupcalls == 0) 1656 upcalls++; 1657 else 1658 upcalls += kg2->kg_numupcalls; 1659 } 1660 mtx_unlock_spin(&sched_lock); 1661 if (upcalls >= max_threads_per_proc) 1662 break; 1663 p->p_maxthrwaits++; 1664 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1665 "maxthreads", NULL); 1666 p->p_maxthrwaits--; 1667 } 1668 PROC_UNLOCK(p); 1669 } 1670 1671 if (td->td_flags & TDF_UPCALLING) { 1672 ku = td->td_upcall; 1673 /* 1674 * There is no more work to do and we are going to ride 1675 * this thread up to userland as an upcall. 1676 * Do the last parts of the setup needed for the upcall. 1677 */ 1678 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1679 td, td->td_proc->p_pid, td->td_proc->p_comm); 1680 1681 /* 1682 * Set user context to the UTS. 1683 * Will use Giant in cpu_thread_clean() because it uses 1684 * kmem_free(kernel_map, ...) 1685 */ 1686 cpu_set_upcall_kse(td, ku); 1687 1688 /* 1689 * Clear TDF_UPCALLING after set upcall context, 1690 * profiling code looks TDF_UPCALLING to avoid account 1691 * a wrong user %EIP 1692 */ 1693 mtx_lock_spin(&sched_lock); 1694 td->td_flags &= ~TDF_UPCALLING; 1695 if (ku->ku_flags & KUF_DOUPCALL) 1696 ku->ku_flags &= ~KUF_DOUPCALL; 1697 mtx_unlock_spin(&sched_lock); 1698 1699 /* 1700 * Unhook the list of completed threads. 1701 * anything that completes after this gets to 1702 * come in next time. 1703 * Put the list of completed thread mailboxes on 1704 * this KSE's mailbox. 1705 */ 1706 error = thread_link_mboxes(kg, ku); 1707 if (error) 1708 goto out; 1709 1710 /* 1711 * Set state and clear the thread mailbox pointer. 1712 * From now on we are just a bound outgoing process. 1713 * **Problem** userret is often called several times. 1714 * it would be nice if this all happenned only on the first 1715 * time through. (the scan for extra work etc.) 1716 */ 1717 error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0); 1718 if (error) 1719 goto out; 1720 1721 /* Export current system time */ 1722 nanotime(&ts); 1723 error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday, 1724 sizeof(ts)); 1725 } 1726 1727out: 1728 if (error) { 1729 /* 1730 * Things are going to be so screwed we should just kill 1731 * the process. 1732 * how do we do that? 1733 */ 1734 PROC_LOCK(td->td_proc); 1735 psignal(td->td_proc, SIGSEGV); 1736 PROC_UNLOCK(td->td_proc); 1737 } else { 1738 /* 1739 * Optimisation: 1740 * Ensure that we have a spare thread available, 1741 * for when we re-enter the kernel. 1742 */ 1743 if (td->td_standin == NULL) 1744 thread_alloc_spare(td, NULL); 1745 } 1746 1747 /* 1748 * Clear thread mailbox first, then clear system tick count. 1749 * The order is important because thread_statclock() use 1750 * mailbox pointer to see if it is an userland thread or 1751 * an UTS kernel thread. 1752 */ 1753 td->td_mailbox = NULL; 1754 td->td_usticks = 0; 1755 return (error); /* go sync */ 1756} 1757 1758/* 1759 * Enforce single-threading. 1760 * 1761 * Returns 1 if the caller must abort (another thread is waiting to 1762 * exit the process or similar). Process is locked! 1763 * Returns 0 when you are successfully the only thread running. 1764 * A process has successfully single threaded in the suspend mode when 1765 * There are no threads in user mode. Threads in the kernel must be 1766 * allowed to continue until they get to the user boundary. They may even 1767 * copy out their return values and data before suspending. They may however be 1768 * accellerated in reaching the user boundary as we will wake up 1769 * any sleeping threads that are interruptable. (PCATCH). 1770 */ 1771int 1772thread_single(int force_exit) 1773{ 1774 struct thread *td; 1775 struct thread *td2; 1776 struct proc *p; 1777 1778 td = curthread; 1779 p = td->td_proc; 1780 mtx_assert(&Giant, MA_OWNED); 1781 PROC_LOCK_ASSERT(p, MA_OWNED); 1782 KASSERT((td != NULL), ("curthread is NULL")); 1783 1784 if ((p->p_flag & P_KSES) == 0) 1785 return (0); 1786 1787 /* Is someone already single threading? */ 1788 if (p->p_singlethread) 1789 return (1); 1790 1791 if (force_exit == SINGLE_EXIT) { 1792 p->p_flag |= P_SINGLE_EXIT; 1793 } else 1794 p->p_flag &= ~P_SINGLE_EXIT; 1795 p->p_flag |= P_STOPPED_SINGLE; 1796 p->p_singlethread = td; 1797 /* XXXKSE Which lock protects the below values? */ 1798 while ((p->p_numthreads - p->p_suspcount) != 1) { 1799 mtx_lock_spin(&sched_lock); 1800 FOREACH_THREAD_IN_PROC(p, td2) { 1801 if (td2 == td) 1802 continue; 1803 td->td_flags |= TDF_ASTPENDING; 1804 if (TD_IS_INHIBITED(td2)) { 1805 if (force_exit == SINGLE_EXIT) { 1806 if (TD_IS_SUSPENDED(td2)) { 1807 thread_unsuspend_one(td2); 1808 } 1809 if (TD_ON_SLEEPQ(td2) && 1810 (td2->td_flags & TDF_SINTR)) { 1811 if (td2->td_flags & TDF_CVWAITQ) 1812 cv_abort(td2); 1813 else 1814 abortsleep(td2); 1815 } 1816 } else { 1817 if (TD_IS_SUSPENDED(td2)) 1818 continue; 1819 /* 1820 * maybe other inhibitted states too? 1821 * XXXKSE Is it totally safe to 1822 * suspend a non-interruptable thread? 1823 */ 1824 if (td2->td_inhibitors & 1825 (TDI_SLEEPING | TDI_SWAPPED)) 1826 thread_suspend_one(td2); 1827 } 1828 } 1829 } 1830 /* 1831 * Maybe we suspended some threads.. was it enough? 1832 */ 1833 if ((p->p_numthreads - p->p_suspcount) == 1) { 1834 mtx_unlock_spin(&sched_lock); 1835 break; 1836 } 1837 1838 /* 1839 * Wake us up when everyone else has suspended. 1840 * In the mean time we suspend as well. 1841 */ 1842 thread_suspend_one(td); 1843 mtx_unlock(&Giant); 1844 PROC_UNLOCK(p); 1845 p->p_stats->p_ru.ru_nvcsw++; 1846 mi_switch(); 1847 mtx_unlock_spin(&sched_lock); 1848 mtx_lock(&Giant); 1849 PROC_LOCK(p); 1850 } 1851 if (force_exit == SINGLE_EXIT) { 1852 if (td->td_upcall) { 1853 mtx_lock_spin(&sched_lock); 1854 upcall_remove(td); 1855 mtx_unlock_spin(&sched_lock); 1856 } 1857 kse_purge(p, td); 1858 } 1859 return (0); 1860} 1861 1862/* 1863 * Called in from locations that can safely check to see 1864 * whether we have to suspend or at least throttle for a 1865 * single-thread event (e.g. fork). 1866 * 1867 * Such locations include userret(). 1868 * If the "return_instead" argument is non zero, the thread must be able to 1869 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1870 * 1871 * The 'return_instead' argument tells the function if it may do a 1872 * thread_exit() or suspend, or whether the caller must abort and back 1873 * out instead. 1874 * 1875 * If the thread that set the single_threading request has set the 1876 * P_SINGLE_EXIT bit in the process flags then this call will never return 1877 * if 'return_instead' is false, but will exit. 1878 * 1879 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1880 *---------------+--------------------+--------------------- 1881 * 0 | returns 0 | returns 0 or 1 1882 * | when ST ends | immediatly 1883 *---------------+--------------------+--------------------- 1884 * 1 | thread exits | returns 1 1885 * | | immediatly 1886 * 0 = thread_exit() or suspension ok, 1887 * other = return error instead of stopping the thread. 1888 * 1889 * While a full suspension is under effect, even a single threading 1890 * thread would be suspended if it made this call (but it shouldn't). 1891 * This call should only be made from places where 1892 * thread_exit() would be safe as that may be the outcome unless 1893 * return_instead is set. 1894 */ 1895int 1896thread_suspend_check(int return_instead) 1897{ 1898 struct thread *td; 1899 struct proc *p; 1900 struct ksegrp *kg; 1901 1902 td = curthread; 1903 p = td->td_proc; 1904 kg = td->td_ksegrp; 1905 PROC_LOCK_ASSERT(p, MA_OWNED); 1906 while (P_SHOULDSTOP(p)) { 1907 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1908 KASSERT(p->p_singlethread != NULL, 1909 ("singlethread not set")); 1910 /* 1911 * The only suspension in action is a 1912 * single-threading. Single threader need not stop. 1913 * XXX Should be safe to access unlocked 1914 * as it can only be set to be true by us. 1915 */ 1916 if (p->p_singlethread == td) 1917 return (0); /* Exempt from stopping. */ 1918 } 1919 if (return_instead) 1920 return (1); 1921 1922 /* 1923 * If the process is waiting for us to exit, 1924 * this thread should just suicide. 1925 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1926 */ 1927 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1928 mtx_lock_spin(&sched_lock); 1929 while (mtx_owned(&Giant)) 1930 mtx_unlock(&Giant); 1931 thread_exit(); 1932 } 1933 1934 /* 1935 * When a thread suspends, it just 1936 * moves to the processes's suspend queue 1937 * and stays there. 1938 */ 1939 mtx_lock_spin(&sched_lock); 1940 if ((p->p_flag & P_STOPPED_SIG) && 1941 (p->p_suspcount+1 == p->p_numthreads)) { 1942 mtx_unlock_spin(&sched_lock); 1943 PROC_LOCK(p->p_pptr); 1944 if ((p->p_pptr->p_procsig->ps_flag & 1945 PS_NOCLDSTOP) == 0) { 1946 psignal(p->p_pptr, SIGCHLD); 1947 } 1948 PROC_UNLOCK(p->p_pptr); 1949 mtx_lock_spin(&sched_lock); 1950 } 1951 mtx_assert(&Giant, MA_NOTOWNED); 1952 thread_suspend_one(td); 1953 PROC_UNLOCK(p); 1954 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1955 if (p->p_numthreads == p->p_suspcount) { 1956 thread_unsuspend_one(p->p_singlethread); 1957 } 1958 } 1959 p->p_stats->p_ru.ru_nivcsw++; 1960 mi_switch(); 1961 mtx_unlock_spin(&sched_lock); 1962 PROC_LOCK(p); 1963 } 1964 return (0); 1965} 1966 1967void 1968thread_suspend_one(struct thread *td) 1969{ 1970 struct proc *p = td->td_proc; 1971 1972 mtx_assert(&sched_lock, MA_OWNED); 1973 p->p_suspcount++; 1974 TD_SET_SUSPENDED(td); 1975 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1976 /* 1977 * Hack: If we are suspending but are on the sleep queue 1978 * then we are in msleep or the cv equivalent. We 1979 * want to look like we have two Inhibitors. 1980 * May already be set.. doesn't matter. 1981 */ 1982 if (TD_ON_SLEEPQ(td)) 1983 TD_SET_SLEEPING(td); 1984} 1985 1986void 1987thread_unsuspend_one(struct thread *td) 1988{ 1989 struct proc *p = td->td_proc; 1990 1991 mtx_assert(&sched_lock, MA_OWNED); 1992 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 1993 TD_CLR_SUSPENDED(td); 1994 p->p_suspcount--; 1995 setrunnable(td); 1996} 1997 1998/* 1999 * Allow all threads blocked by single threading to continue running. 2000 */ 2001void 2002thread_unsuspend(struct proc *p) 2003{ 2004 struct thread *td; 2005 2006 mtx_assert(&sched_lock, MA_OWNED); 2007 PROC_LOCK_ASSERT(p, MA_OWNED); 2008 if (!P_SHOULDSTOP(p)) { 2009 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2010 thread_unsuspend_one(td); 2011 } 2012 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 2013 (p->p_numthreads == p->p_suspcount)) { 2014 /* 2015 * Stopping everything also did the job for the single 2016 * threading request. Now we've downgraded to single-threaded, 2017 * let it continue. 2018 */ 2019 thread_unsuspend_one(p->p_singlethread); 2020 } 2021} 2022 2023void 2024thread_single_end(void) 2025{ 2026 struct thread *td; 2027 struct proc *p; 2028 2029 td = curthread; 2030 p = td->td_proc; 2031 PROC_LOCK_ASSERT(p, MA_OWNED); 2032 p->p_flag &= ~P_STOPPED_SINGLE; 2033 p->p_singlethread = NULL; 2034 /* 2035 * If there are other threads they mey now run, 2036 * unless of course there is a blanket 'stop order' 2037 * on the process. The single threader must be allowed 2038 * to continue however as this is a bad place to stop. 2039 */ 2040 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2041 mtx_lock_spin(&sched_lock); 2042 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2043 thread_unsuspend_one(td); 2044 } 2045 mtx_unlock_spin(&sched_lock); 2046 } 2047} 2048 2049 2050