kern_thread.c revision 112079
1/* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * $FreeBSD: head/sys/kern/kern_thread.c 112078 2003-03-11 03:16:02Z davidxu $ 29 */ 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/kernel.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/smp.h> 39#include <sys/sysctl.h> 40#include <sys/sysproto.h> 41#include <sys/filedesc.h> 42#include <sys/sched.h> 43#include <sys/signalvar.h> 44#include <sys/sx.h> 45#include <sys/tty.h> 46#include <sys/user.h> 47#include <sys/jail.h> 48#include <sys/kse.h> 49#include <sys/ktr.h> 50#include <sys/ucontext.h> 51 52#include <vm/vm.h> 53#include <vm/vm_object.h> 54#include <vm/pmap.h> 55#include <vm/uma.h> 56#include <vm/vm_map.h> 57 58#include <machine/frame.h> 59 60/* 61 * KSEGRP related storage. 62 */ 63static uma_zone_t ksegrp_zone; 64static uma_zone_t kse_zone; 65static uma_zone_t thread_zone; 66static uma_zone_t upcall_zone; 67 68/* DEBUG ONLY */ 69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70static int thread_debug = 0; 71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72 &thread_debug, 0, "thread debug"); 73 74static int max_threads_per_proc = 30; 75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76 &max_threads_per_proc, 0, "Limit on threads per proc"); 77 78static int max_groups_per_proc = 5; 79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81 82static int max_threads_hits; 83SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84 &max_threads_hits, 0, ""); 85 86static int virtual_cpu; 87 88#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 89 90TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93TAILQ_HEAD(, kse_upcall) zombie_upcalls = 94 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95struct mtx kse_zombie_lock; 96MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 97 98static void kse_purge(struct proc *p, struct thread *td); 99static void kse_purge_group(struct thread *td); 100static int thread_update_usr_ticks(struct thread *td, int user); 101static void thread_alloc_spare(struct thread *td, struct thread *spare); 102 103static int 104sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 105{ 106 int error, new_val; 107 int def_val; 108 109#ifdef SMP 110 def_val = mp_ncpus; 111#else 112 def_val = 1; 113#endif 114 if (virtual_cpu == 0) 115 new_val = def_val; 116 else 117 new_val = virtual_cpu; 118 error = sysctl_handle_int(oidp, &new_val, 0, req); 119 if (error != 0 || req->newptr == NULL) 120 return (error); 121 if (new_val < 0) 122 return (EINVAL); 123 virtual_cpu = new_val; 124 return (0); 125} 126 127/* DEBUG ONLY */ 128SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 129 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 130 "debug virtual cpus"); 131 132/* 133 * Prepare a thread for use. 134 */ 135static void 136thread_ctor(void *mem, int size, void *arg) 137{ 138 struct thread *td; 139 140 td = (struct thread *)mem; 141 td->td_state = TDS_INACTIVE; 142} 143 144/* 145 * Reclaim a thread after use. 146 */ 147static void 148thread_dtor(void *mem, int size, void *arg) 149{ 150 struct thread *td; 151 152 td = (struct thread *)mem; 153 154#ifdef INVARIANTS 155 /* Verify that this thread is in a safe state to free. */ 156 switch (td->td_state) { 157 case TDS_INHIBITED: 158 case TDS_RUNNING: 159 case TDS_CAN_RUN: 160 case TDS_RUNQ: 161 /* 162 * We must never unlink a thread that is in one of 163 * these states, because it is currently active. 164 */ 165 panic("bad state for thread unlinking"); 166 /* NOTREACHED */ 167 case TDS_INACTIVE: 168 break; 169 default: 170 panic("bad thread state"); 171 /* NOTREACHED */ 172 } 173#endif 174} 175 176/* 177 * Initialize type-stable parts of a thread (when newly created). 178 */ 179static void 180thread_init(void *mem, int size) 181{ 182 struct thread *td; 183 184 td = (struct thread *)mem; 185 mtx_lock(&Giant); 186 pmap_new_thread(td, 0); 187 mtx_unlock(&Giant); 188 cpu_thread_setup(td); 189 td->td_sched = (struct td_sched *)&td[1]; 190} 191 192/* 193 * Tear down type-stable parts of a thread (just before being discarded). 194 */ 195static void 196thread_fini(void *mem, int size) 197{ 198 struct thread *td; 199 200 td = (struct thread *)mem; 201 pmap_dispose_thread(td); 202} 203 204/* 205 * Initialize type-stable parts of a kse (when newly created). 206 */ 207static void 208kse_init(void *mem, int size) 209{ 210 struct kse *ke; 211 212 ke = (struct kse *)mem; 213 ke->ke_sched = (struct ke_sched *)&ke[1]; 214} 215 216/* 217 * Initialize type-stable parts of a ksegrp (when newly created). 218 */ 219static void 220ksegrp_init(void *mem, int size) 221{ 222 struct ksegrp *kg; 223 224 kg = (struct ksegrp *)mem; 225 kg->kg_sched = (struct kg_sched *)&kg[1]; 226} 227 228/* 229 * KSE is linked into kse group. 230 */ 231void 232kse_link(struct kse *ke, struct ksegrp *kg) 233{ 234 struct proc *p = kg->kg_proc; 235 236 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 237 kg->kg_kses++; 238 ke->ke_state = KES_UNQUEUED; 239 ke->ke_proc = p; 240 ke->ke_ksegrp = kg; 241 ke->ke_thread = NULL; 242 ke->ke_oncpu = NOCPU; 243 ke->ke_flags = 0; 244} 245 246void 247kse_unlink(struct kse *ke) 248{ 249 struct ksegrp *kg; 250 251 mtx_assert(&sched_lock, MA_OWNED); 252 kg = ke->ke_ksegrp; 253 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 254 if (ke->ke_state == KES_IDLE) { 255 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 256 kg->kg_idle_kses--; 257 } 258 if (--kg->kg_kses == 0) 259 ksegrp_unlink(kg); 260 /* 261 * Aggregate stats from the KSE 262 */ 263 kse_stash(ke); 264} 265 266void 267ksegrp_link(struct ksegrp *kg, struct proc *p) 268{ 269 270 TAILQ_INIT(&kg->kg_threads); 271 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 272 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 273 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 274 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 275 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 276 kg->kg_proc = p; 277 /* 278 * the following counters are in the -zero- section 279 * and may not need clearing 280 */ 281 kg->kg_numthreads = 0; 282 kg->kg_runnable = 0; 283 kg->kg_kses = 0; 284 kg->kg_runq_kses = 0; /* XXXKSE change name */ 285 kg->kg_idle_kses = 0; 286 kg->kg_numupcalls = 0; 287 /* link it in now that it's consistent */ 288 p->p_numksegrps++; 289 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 290} 291 292void 293ksegrp_unlink(struct ksegrp *kg) 294{ 295 struct proc *p; 296 297 mtx_assert(&sched_lock, MA_OWNED); 298 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 299 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 300 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 301 302 p = kg->kg_proc; 303 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 304 p->p_numksegrps--; 305 /* 306 * Aggregate stats from the KSE 307 */ 308 ksegrp_stash(kg); 309} 310 311struct kse_upcall * 312upcall_alloc(void) 313{ 314 struct kse_upcall *ku; 315 316 ku = uma_zalloc(upcall_zone, M_WAITOK); 317 bzero(ku, sizeof(*ku)); 318 return (ku); 319} 320 321void 322upcall_free(struct kse_upcall *ku) 323{ 324 325 uma_zfree(upcall_zone, ku); 326} 327 328void 329upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 330{ 331 332 mtx_assert(&sched_lock, MA_OWNED); 333 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 334 ku->ku_ksegrp = kg; 335 kg->kg_numupcalls++; 336} 337 338void 339upcall_unlink(struct kse_upcall *ku) 340{ 341 struct ksegrp *kg = ku->ku_ksegrp; 342 343 mtx_assert(&sched_lock, MA_OWNED); 344 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 345 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 346 kg->kg_numupcalls--; 347 upcall_stash(ku); 348} 349 350void 351upcall_remove(struct thread *td) 352{ 353 354 if (td->td_upcall) { 355 td->td_upcall->ku_owner = NULL; 356 upcall_unlink(td->td_upcall); 357 td->td_upcall = 0; 358 } 359} 360 361/* 362 * For a newly created process, 363 * link up all the structures and its initial threads etc. 364 */ 365void 366proc_linkup(struct proc *p, struct ksegrp *kg, 367 struct kse *ke, struct thread *td) 368{ 369 370 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 371 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 372 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 373 p->p_numksegrps = 0; 374 p->p_numthreads = 0; 375 376 ksegrp_link(kg, p); 377 kse_link(ke, kg); 378 thread_link(td, kg); 379} 380 381/* 382struct kse_thr_interrupt_args { 383 struct kse_thr_mailbox * tmbx; 384}; 385*/ 386int 387kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 388{ 389 struct proc *p; 390 struct thread *td2; 391 392 p = td->td_proc; 393 if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) 394 return (EINVAL); 395 mtx_lock_spin(&sched_lock); 396 FOREACH_THREAD_IN_PROC(p, td2) { 397 if (td2->td_mailbox == uap->tmbx) { 398 td2->td_flags |= TDF_INTERRUPT; 399 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 400 if (td2->td_flags & TDF_CVWAITQ) 401 cv_abort(td2); 402 else 403 abortsleep(td2); 404 } 405 mtx_unlock_spin(&sched_lock); 406 return (0); 407 } 408 } 409 mtx_unlock_spin(&sched_lock); 410 return (ESRCH); 411} 412 413/* 414struct kse_exit_args { 415 register_t dummy; 416}; 417*/ 418int 419kse_exit(struct thread *td, struct kse_exit_args *uap) 420{ 421 struct proc *p; 422 struct ksegrp *kg; 423 struct kse *ke; 424 425 p = td->td_proc; 426 /* 427 * Only UTS can call the syscall and current group 428 * should be a threaded group. 429 */ 430 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 431 return (EINVAL); 432 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 433 434 kg = td->td_ksegrp; 435 /* Serialize removing upcall */ 436 PROC_LOCK(p); 437 mtx_lock_spin(&sched_lock); 438 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { 439 mtx_unlock_spin(&sched_lock); 440 PROC_UNLOCK(p); 441 return (EDEADLK); 442 } 443 ke = td->td_kse; 444 upcall_remove(td); 445 if (p->p_numthreads == 1) { 446 kse_purge(p, td); 447 p->p_flag &= ~P_THREADED; 448 mtx_unlock_spin(&sched_lock); 449 PROC_UNLOCK(p); 450 } else { 451 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 452 kse_purge_group(td); 453 ke->ke_flags |= KEF_EXIT; 454 } 455 thread_stopped(p); 456 thread_exit(); 457 /* NOTREACHED */ 458 } 459 return (0); 460} 461 462/* 463 * Either becomes an upcall or waits for an awakening event and 464 * then becomes an upcall. Only error cases return. 465 */ 466/* 467struct kse_release_args { 468 struct timespec *timeout; 469}; 470*/ 471int 472kse_release(struct thread *td, struct kse_release_args *uap) 473{ 474 struct proc *p; 475 struct ksegrp *kg; 476 struct timespec ts, ts2, ts3, timeout; 477 struct timeval tv; 478 int error; 479 480 p = td->td_proc; 481 kg = td->td_ksegrp; 482 /* 483 * Only UTS can call the syscall and current group 484 * should be a threaded group. 485 */ 486 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 487 return (EINVAL); 488 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 489 if (uap->timeout != NULL) { 490 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 491 return (error); 492 getnanouptime(&ts); 493 timespecadd(&ts, &timeout); 494 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 495 } 496 mtx_lock_spin(&sched_lock); 497 /* Change OURSELF to become an upcall. */ 498 td->td_flags = TDF_UPCALLING; 499 if (p->p_sflag & PS_NEEDSIGCHK) 500 td->td_flags |= TDF_ASTPENDING; 501 mtx_unlock_spin(&sched_lock); 502 PROC_LOCK(p); 503 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 504 (kg->kg_completed == NULL)) { 505 kg->kg_upsleeps++; 506 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 507 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0)); 508 kg->kg_upsleeps--; 509 PROC_UNLOCK(p); 510 if (uap->timeout == NULL || error != EWOULDBLOCK) 511 return (0); 512 getnanouptime(&ts2); 513 if (timespeccmp(&ts2, &ts, >=)) 514 return (0); 515 ts3 = ts; 516 timespecsub(&ts3, &ts2); 517 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 518 PROC_LOCK(p); 519 } 520 PROC_UNLOCK(p); 521 return (0); 522} 523 524/* struct kse_wakeup_args { 525 struct kse_mailbox *mbx; 526}; */ 527int 528kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 529{ 530 struct proc *p; 531 struct ksegrp *kg; 532 struct kse_upcall *ku; 533 struct thread *td2; 534 535 p = td->td_proc; 536 td2 = NULL; 537 ku = NULL; 538 /* KSE-enabled processes only, please. */ 539 if (!(p->p_flag & P_THREADED)) 540 return (EINVAL); 541 PROC_LOCK(p); 542 mtx_lock_spin(&sched_lock); 543 if (uap->mbx) { 544 FOREACH_KSEGRP_IN_PROC(p, kg) { 545 FOREACH_UPCALL_IN_GROUP(kg, ku) { 546 if (ku->ku_mailbox == uap->mbx) 547 break; 548 } 549 if (ku) 550 break; 551 } 552 } else { 553 kg = td->td_ksegrp; 554 if (kg->kg_upsleeps) { 555 wakeup_one(&kg->kg_completed); 556 mtx_unlock_spin(&sched_lock); 557 PROC_UNLOCK(p); 558 return (0); 559 } 560 ku = TAILQ_FIRST(&kg->kg_upcalls); 561 } 562 if (ku) { 563 if ((td2 = ku->ku_owner) == NULL) { 564 panic("%s: no owner", __func__); 565 } else if (TD_ON_SLEEPQ(td2) && 566 (td2->td_wchan == &kg->kg_completed)) { 567 abortsleep(td2); 568 } else { 569 ku->ku_flags |= KUF_DOUPCALL; 570 } 571 mtx_unlock_spin(&sched_lock); 572 PROC_UNLOCK(p); 573 return (0); 574 } 575 mtx_unlock_spin(&sched_lock); 576 PROC_UNLOCK(p); 577 return (ESRCH); 578} 579 580/* 581 * No new KSEG: first call: use current KSE, don't schedule an upcall 582 * All other situations, do allocate max new KSEs and schedule an upcall. 583 */ 584/* struct kse_create_args { 585 struct kse_mailbox *mbx; 586 int newgroup; 587}; */ 588int 589kse_create(struct thread *td, struct kse_create_args *uap) 590{ 591 struct kse *newke; 592 struct ksegrp *newkg; 593 struct ksegrp *kg; 594 struct proc *p; 595 struct kse_mailbox mbx; 596 struct kse_upcall *newku; 597 int err, ncpus; 598 599 p = td->td_proc; 600 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 601 return (err); 602 603 /* Too bad, why hasn't kernel always a cpu counter !? */ 604#ifdef SMP 605 ncpus = mp_ncpus; 606#else 607 ncpus = 1; 608#endif 609 if (thread_debug && virtual_cpu != 0) 610 ncpus = virtual_cpu; 611 612 /* Easier to just set it than to test and set */ 613 PROC_LOCK(p); 614 p->p_flag |= P_THREADED; 615 PROC_UNLOCK(p); 616 kg = td->td_ksegrp; 617 if (uap->newgroup) { 618 /* Have race condition but it is cheap */ 619 if (p->p_numksegrps >= max_groups_per_proc) 620 return (EPROCLIM); 621 /* 622 * If we want a new KSEGRP it doesn't matter whether 623 * we have already fired up KSE mode before or not. 624 * We put the process in KSE mode and create a new KSEGRP. 625 */ 626 newkg = ksegrp_alloc(); 627 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 628 kg_startzero, kg_endzero)); 629 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 630 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 631 mtx_lock_spin(&sched_lock); 632 if (p->p_numksegrps >= max_groups_per_proc) { 633 mtx_unlock_spin(&sched_lock); 634 ksegrp_free(newkg); 635 return (EPROCLIM); 636 } 637 ksegrp_link(newkg, p); 638 mtx_unlock_spin(&sched_lock); 639 } else { 640 newkg = kg; 641 } 642 643 /* 644 * Creating upcalls more than number of physical cpu does 645 * not help performance. 646 */ 647 if (newkg->kg_numupcalls >= ncpus) 648 return (EPROCLIM); 649 650 if (newkg->kg_numupcalls == 0) { 651 /* 652 * Initialize KSE group, optimized for MP. 653 * Create KSEs as many as physical cpus, this increases 654 * concurrent even if userland is not MP safe and can only run 655 * on single CPU (for early version of libpthread, it is true). 656 * In ideal world, every physical cpu should execute a thread. 657 * If there is enough KSEs, threads in kernel can be 658 * executed parallel on different cpus with full speed, 659 * Concurrent in kernel shouldn't be restricted by number of 660 * upcalls userland provides. 661 * Adding more upcall structures only increases concurrent 662 * in userland. 663 * Highest performance configuration is: 664 * N kses = N upcalls = N phyiscal cpus 665 */ 666 while (newkg->kg_kses < ncpus) { 667 newke = kse_alloc(); 668 bzero(&newke->ke_startzero, RANGEOF(struct kse, 669 ke_startzero, ke_endzero)); 670#if 0 671 mtx_lock_spin(&sched_lock); 672 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 673 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 674 mtx_unlock_spin(&sched_lock); 675#endif 676 mtx_lock_spin(&sched_lock); 677 kse_link(newke, newkg); 678 /* Add engine */ 679 kse_reassign(newke); 680 mtx_unlock_spin(&sched_lock); 681 } 682 } 683 newku = upcall_alloc(); 684 newku->ku_mailbox = uap->mbx; 685 newku->ku_func = mbx.km_func; 686 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 687 688 /* For the first call this may not have been set */ 689 if (td->td_standin == NULL) 690 thread_alloc_spare(td, NULL); 691 692 mtx_lock_spin(&sched_lock); 693 if (newkg->kg_numupcalls >= ncpus) { 694 mtx_unlock_spin(&sched_lock); 695 upcall_free(newku); 696 return (EPROCLIM); 697 } 698 upcall_link(newku, newkg); 699 700 /* 701 * Each upcall structure has an owner thread, find which 702 * one owns it. 703 */ 704 if (uap->newgroup) { 705 /* 706 * Because new ksegrp hasn't thread, 707 * create an initial upcall thread to own it. 708 */ 709 thread_schedule_upcall(td, newku); 710 } else { 711 /* 712 * If current thread hasn't an upcall structure, 713 * just assign the upcall to it. 714 */ 715 if (td->td_upcall == NULL) { 716 newku->ku_owner = td; 717 td->td_upcall = newku; 718 } else { 719 /* 720 * Create a new upcall thread to own it. 721 */ 722 thread_schedule_upcall(td, newku); 723 } 724 } 725 mtx_unlock_spin(&sched_lock); 726 return (0); 727} 728 729/* 730 * Fill a ucontext_t with a thread's context information. 731 * 732 * This is an analogue to getcontext(3). 733 */ 734void 735thread_getcontext(struct thread *td, ucontext_t *uc) 736{ 737 738/* 739 * XXX this is declared in a MD include file, i386/include/ucontext.h but 740 * is used in MI code. 741 */ 742#ifdef __i386__ 743 get_mcontext(td, &uc->uc_mcontext); 744#endif 745 uc->uc_sigmask = td->td_proc->p_sigmask; 746} 747 748/* 749 * Set a thread's context from a ucontext_t. 750 * 751 * This is an analogue to setcontext(3). 752 */ 753int 754thread_setcontext(struct thread *td, ucontext_t *uc) 755{ 756 int ret; 757 758/* 759 * XXX this is declared in a MD include file, i386/include/ucontext.h but 760 * is used in MI code. 761 */ 762#ifdef __i386__ 763 ret = set_mcontext(td, &uc->uc_mcontext); 764#else 765 ret = ENOSYS; 766#endif 767 if (ret == 0) { 768 SIG_CANTMASK(uc->uc_sigmask); 769 PROC_LOCK(td->td_proc); 770 td->td_proc->p_sigmask = uc->uc_sigmask; 771 PROC_UNLOCK(td->td_proc); 772 } 773 return (ret); 774} 775 776/* 777 * Initialize global thread allocation resources. 778 */ 779void 780threadinit(void) 781{ 782 783#ifndef __ia64__ 784 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 785 thread_ctor, thread_dtor, thread_init, thread_fini, 786 UMA_ALIGN_CACHE, 0); 787#else 788 /* 789 * XXX the ia64 kstack allocator is really lame and is at the mercy 790 * of contigmallloc(). This hackery is to pre-construct a whole 791 * pile of thread structures with associated kernel stacks early 792 * in the system startup while contigmalloc() still works. Once we 793 * have them, keep them. Sigh. 794 */ 795 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 796 thread_ctor, thread_dtor, thread_init, thread_fini, 797 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); 798 uma_prealloc(thread_zone, 512); /* XXX arbitary */ 799#endif 800 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 801 NULL, NULL, ksegrp_init, NULL, 802 UMA_ALIGN_CACHE, 0); 803 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 804 NULL, NULL, kse_init, NULL, 805 UMA_ALIGN_CACHE, 0); 806 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 807 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 808} 809 810/* 811 * Stash an embarasingly extra thread into the zombie thread queue. 812 */ 813void 814thread_stash(struct thread *td) 815{ 816 mtx_lock_spin(&kse_zombie_lock); 817 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 818 mtx_unlock_spin(&kse_zombie_lock); 819} 820 821/* 822 * Stash an embarasingly extra kse into the zombie kse queue. 823 */ 824void 825kse_stash(struct kse *ke) 826{ 827 mtx_lock_spin(&kse_zombie_lock); 828 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 829 mtx_unlock_spin(&kse_zombie_lock); 830} 831 832/* 833 * Stash an embarasingly extra upcall into the zombie upcall queue. 834 */ 835 836void 837upcall_stash(struct kse_upcall *ku) 838{ 839 mtx_lock_spin(&kse_zombie_lock); 840 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 841 mtx_unlock_spin(&kse_zombie_lock); 842} 843 844/* 845 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 846 */ 847void 848ksegrp_stash(struct ksegrp *kg) 849{ 850 mtx_lock_spin(&kse_zombie_lock); 851 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 852 mtx_unlock_spin(&kse_zombie_lock); 853} 854 855/* 856 * Reap zombie kse resource. 857 */ 858void 859thread_reap(void) 860{ 861 struct thread *td_first, *td_next; 862 struct kse *ke_first, *ke_next; 863 struct ksegrp *kg_first, * kg_next; 864 struct kse_upcall *ku_first, *ku_next; 865 866 /* 867 * Don't even bother to lock if none at this instant, 868 * we really don't care about the next instant.. 869 */ 870 if ((!TAILQ_EMPTY(&zombie_threads)) 871 || (!TAILQ_EMPTY(&zombie_kses)) 872 || (!TAILQ_EMPTY(&zombie_ksegrps)) 873 || (!TAILQ_EMPTY(&zombie_upcalls))) { 874 mtx_lock_spin(&kse_zombie_lock); 875 td_first = TAILQ_FIRST(&zombie_threads); 876 ke_first = TAILQ_FIRST(&zombie_kses); 877 kg_first = TAILQ_FIRST(&zombie_ksegrps); 878 ku_first = TAILQ_FIRST(&zombie_upcalls); 879 if (td_first) 880 TAILQ_INIT(&zombie_threads); 881 if (ke_first) 882 TAILQ_INIT(&zombie_kses); 883 if (kg_first) 884 TAILQ_INIT(&zombie_ksegrps); 885 if (ku_first) 886 TAILQ_INIT(&zombie_upcalls); 887 mtx_unlock_spin(&kse_zombie_lock); 888 while (td_first) { 889 td_next = TAILQ_NEXT(td_first, td_runq); 890 if (td_first->td_ucred) 891 crfree(td_first->td_ucred); 892 thread_free(td_first); 893 td_first = td_next; 894 } 895 while (ke_first) { 896 ke_next = TAILQ_NEXT(ke_first, ke_procq); 897 kse_free(ke_first); 898 ke_first = ke_next; 899 } 900 while (kg_first) { 901 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 902 ksegrp_free(kg_first); 903 kg_first = kg_next; 904 } 905 while (ku_first) { 906 ku_next = TAILQ_NEXT(ku_first, ku_link); 907 upcall_free(ku_first); 908 ku_first = ku_next; 909 } 910 } 911} 912 913/* 914 * Allocate a ksegrp. 915 */ 916struct ksegrp * 917ksegrp_alloc(void) 918{ 919 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 920} 921 922/* 923 * Allocate a kse. 924 */ 925struct kse * 926kse_alloc(void) 927{ 928 return (uma_zalloc(kse_zone, M_WAITOK)); 929} 930 931/* 932 * Allocate a thread. 933 */ 934struct thread * 935thread_alloc(void) 936{ 937 thread_reap(); /* check if any zombies to get */ 938 return (uma_zalloc(thread_zone, M_WAITOK)); 939} 940 941/* 942 * Deallocate a ksegrp. 943 */ 944void 945ksegrp_free(struct ksegrp *td) 946{ 947 uma_zfree(ksegrp_zone, td); 948} 949 950/* 951 * Deallocate a kse. 952 */ 953void 954kse_free(struct kse *td) 955{ 956 uma_zfree(kse_zone, td); 957} 958 959/* 960 * Deallocate a thread. 961 */ 962void 963thread_free(struct thread *td) 964{ 965 966 cpu_thread_clean(td); 967 uma_zfree(thread_zone, td); 968} 969 970/* 971 * Store the thread context in the UTS's mailbox. 972 * then add the mailbox at the head of a list we are building in user space. 973 * The list is anchored in the ksegrp structure. 974 */ 975int 976thread_export_context(struct thread *td) 977{ 978 struct proc *p; 979 struct ksegrp *kg; 980 uintptr_t mbx; 981 void *addr; 982 int error,temp; 983 ucontext_t uc; 984 985 p = td->td_proc; 986 kg = td->td_ksegrp; 987 988 /* Export the user/machine context. */ 989 addr = (void *)(&td->td_mailbox->tm_context); 990 error = copyin(addr, &uc, sizeof(ucontext_t)); 991 if (error) 992 goto bad; 993 994 thread_getcontext(td, &uc); 995 error = copyout(&uc, addr, sizeof(ucontext_t)); 996 if (error) 997 goto bad; 998 999 /* Exports clock ticks in kernel mode */ 1000 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 1001 temp = fuword(addr) + td->td_usticks; 1002 if (suword(addr, temp)) 1003 goto bad; 1004 1005 addr = (caddr_t)(&td->td_mailbox->tm_slices); 1006 temp = fuword(addr) - td->td_usticks; 1007 if (suword(addr, temp)) 1008 goto bad; 1009 1010 /* Get address in latest mbox of list pointer */ 1011 addr = (void *)(&td->td_mailbox->tm_next); 1012 /* 1013 * Put the saved address of the previous first 1014 * entry into this one 1015 */ 1016 for (;;) { 1017 mbx = (uintptr_t)kg->kg_completed; 1018 if (suword(addr, mbx)) { 1019 error = EFAULT; 1020 goto bad; 1021 } 1022 PROC_LOCK(p); 1023 if (mbx == (uintptr_t)kg->kg_completed) { 1024 kg->kg_completed = td->td_mailbox; 1025 /* 1026 * The thread context may be taken away by 1027 * other upcall threads when we unlock 1028 * process lock. it's no longer valid to 1029 * use it again in any other places. 1030 */ 1031 td->td_mailbox = NULL; 1032 PROC_UNLOCK(p); 1033 break; 1034 } 1035 PROC_UNLOCK(p); 1036 } 1037 td->td_usticks = 0; 1038 return (0); 1039 1040bad: 1041 PROC_LOCK(p); 1042 psignal(p, SIGSEGV); 1043 PROC_UNLOCK(p); 1044 /* The mailbox is bad, don't use it */ 1045 td->td_mailbox = NULL; 1046 td->td_usticks = 0; 1047 return (error); 1048} 1049 1050/* 1051 * Take the list of completed mailboxes for this KSEGRP and put them on this 1052 * upcall's mailbox as it's the next one going up. 1053 */ 1054static int 1055thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1056{ 1057 struct proc *p = kg->kg_proc; 1058 void *addr; 1059 uintptr_t mbx; 1060 1061 addr = (void *)(&ku->ku_mailbox->km_completed); 1062 for (;;) { 1063 mbx = (uintptr_t)kg->kg_completed; 1064 if (suword(addr, mbx)) { 1065 PROC_LOCK(p); 1066 psignal(p, SIGSEGV); 1067 PROC_UNLOCK(p); 1068 return (EFAULT); 1069 } 1070 PROC_LOCK(p); 1071 if (mbx == (uintptr_t)kg->kg_completed) { 1072 kg->kg_completed = NULL; 1073 PROC_UNLOCK(p); 1074 break; 1075 } 1076 PROC_UNLOCK(p); 1077 } 1078 return (0); 1079} 1080 1081/* 1082 * This function should be called at statclock interrupt time 1083 */ 1084int 1085thread_statclock(int user) 1086{ 1087 struct thread *td = curthread; 1088 1089 if (td->td_ksegrp->kg_numupcalls == 0) 1090 return (-1); 1091 if (user) { 1092 /* Current always do via ast() */ 1093 mtx_lock_spin(&sched_lock); 1094 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1095 mtx_unlock_spin(&sched_lock); 1096 td->td_uuticks++; 1097 } else { 1098 if (td->td_mailbox != NULL) 1099 td->td_usticks++; 1100 else { 1101 /* XXXKSE 1102 * We will call thread_user_enter() for every 1103 * kernel entry in future, so if the thread mailbox 1104 * is NULL, it must be a UTS kernel, don't account 1105 * clock ticks for it. 1106 */ 1107 } 1108 } 1109 return (0); 1110} 1111 1112/* 1113 * Export state clock ticks for userland 1114 */ 1115static int 1116thread_update_usr_ticks(struct thread *td, int user) 1117{ 1118 struct proc *p = td->td_proc; 1119 struct kse_thr_mailbox *tmbx; 1120 struct kse_upcall *ku; 1121 caddr_t addr; 1122 uint uticks; 1123 int slices; 1124 1125 if ((ku = td->td_upcall) == NULL) 1126 return (-1); 1127 1128 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1129 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1130 return (-1); 1131 if (user) { 1132 uticks = td->td_uuticks; 1133 td->td_uuticks = 0; 1134 addr = (caddr_t)&tmbx->tm_uticks; 1135 } else { 1136 uticks = td->td_usticks; 1137 td->td_usticks = 0; 1138 addr = (caddr_t)&tmbx->tm_sticks; 1139 } 1140 if (uticks) { 1141 if (suword(addr, uticks+fuword(addr))) { 1142 PROC_LOCK(p); 1143 psignal(p, SIGSEGV); 1144 PROC_UNLOCK(p); 1145 return (-2); 1146 } 1147 addr = (caddr_t)&tmbx->tm_slices; 1148 slices = (int)fuword(addr); 1149 if (slices > 0) { 1150 slices -= (int)uticks; 1151 if (suword(addr, slices)) { 1152 PROC_LOCK(p); 1153 psignal(p, SIGSEGV); 1154 PROC_UNLOCK(p); 1155 return (-2); 1156 } 1157 if (slices <= 0) { 1158 mtx_lock_spin(&sched_lock); 1159 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1160 mtx_unlock_spin(&sched_lock); 1161 } 1162 } 1163 } 1164 return (0); 1165} 1166 1167/* 1168 * Discard the current thread and exit from its context. 1169 * 1170 * Because we can't free a thread while we're operating under its context, 1171 * push the current thread into our CPU's deadthread holder. This means 1172 * we needn't worry about someone else grabbing our context before we 1173 * do a cpu_throw(). 1174 */ 1175void 1176thread_exit(void) 1177{ 1178 struct thread *td; 1179 struct kse *ke; 1180 struct proc *p; 1181 struct ksegrp *kg; 1182 1183 td = curthread; 1184 kg = td->td_ksegrp; 1185 p = td->td_proc; 1186 ke = td->td_kse; 1187 1188 mtx_assert(&sched_lock, MA_OWNED); 1189 KASSERT(p != NULL, ("thread exiting without a process")); 1190 KASSERT(ke != NULL, ("thread exiting without a kse")); 1191 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1192 PROC_LOCK_ASSERT(p, MA_OWNED); 1193 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1194 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1195 1196 if (td->td_standin != NULL) { 1197 thread_stash(td->td_standin); 1198 td->td_standin = NULL; 1199 } 1200 1201 cpu_thread_exit(td); /* XXXSMP */ 1202 1203 /* 1204 * The last thread is left attached to the process 1205 * So that the whole bundle gets recycled. Skip 1206 * all this stuff. 1207 */ 1208 if (p->p_numthreads > 1) { 1209 /* 1210 * Unlink this thread from its proc and the kseg. 1211 * In keeping with the other structs we probably should 1212 * have a thread_unlink() that does some of this but it 1213 * would only be called from here (I think) so it would 1214 * be a waste. (might be useful for proc_fini() as well.) 1215 */ 1216 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1217 p->p_numthreads--; 1218 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1219 kg->kg_numthreads--; 1220 if (p->p_maxthrwaits) 1221 wakeup(&p->p_numthreads); 1222 /* 1223 * The test below is NOT true if we are the 1224 * sole exiting thread. P_STOPPED_SNGL is unset 1225 * in exit1() after it is the only survivor. 1226 */ 1227 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1228 if (p->p_numthreads == p->p_suspcount) { 1229 thread_unsuspend_one(p->p_singlethread); 1230 } 1231 } 1232 1233 /* 1234 * Because each upcall structure has an owner thread, 1235 * owner thread exits only when process is in exiting 1236 * state, so upcall to userland is no longer needed, 1237 * deleting upcall structure is safe here. 1238 * So when all threads in a group is exited, all upcalls 1239 * in the group should be automatically freed. 1240 */ 1241 if (td->td_upcall) 1242 upcall_remove(td); 1243 1244 ke->ke_state = KES_UNQUEUED; 1245 ke->ke_thread = NULL; 1246 /* 1247 * Decide what to do with the KSE attached to this thread. 1248 */ 1249 if (ke->ke_flags & KEF_EXIT) 1250 kse_unlink(ke); 1251 else 1252 kse_reassign(ke); 1253 PROC_UNLOCK(p); 1254 td->td_kse = NULL; 1255 td->td_state = TDS_INACTIVE; 1256 td->td_proc = NULL; 1257 td->td_ksegrp = NULL; 1258 td->td_last_kse = NULL; 1259 PCPU_SET(deadthread, td); 1260 } else { 1261 PROC_UNLOCK(p); 1262 } 1263 cpu_throw(); 1264 /* NOTREACHED */ 1265} 1266 1267/* 1268 * Do any thread specific cleanups that may be needed in wait() 1269 * called with Giant held, proc and schedlock not held. 1270 */ 1271void 1272thread_wait(struct proc *p) 1273{ 1274 struct thread *td; 1275 1276 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1277 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1278 FOREACH_THREAD_IN_PROC(p, td) { 1279 if (td->td_standin != NULL) { 1280 thread_free(td->td_standin); 1281 td->td_standin = NULL; 1282 } 1283 cpu_thread_clean(td); 1284 } 1285 thread_reap(); /* check for zombie threads etc. */ 1286} 1287 1288/* 1289 * Link a thread to a process. 1290 * set up anything that needs to be initialized for it to 1291 * be used by the process. 1292 * 1293 * Note that we do not link to the proc's ucred here. 1294 * The thread is linked as if running but no KSE assigned. 1295 */ 1296void 1297thread_link(struct thread *td, struct ksegrp *kg) 1298{ 1299 struct proc *p; 1300 1301 p = kg->kg_proc; 1302 td->td_state = TDS_INACTIVE; 1303 td->td_proc = p; 1304 td->td_ksegrp = kg; 1305 td->td_last_kse = NULL; 1306 td->td_flags = 0; 1307 td->td_kse = NULL; 1308 1309 LIST_INIT(&td->td_contested); 1310 callout_init(&td->td_slpcallout, 1); 1311 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1312 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1313 p->p_numthreads++; 1314 kg->kg_numthreads++; 1315} 1316 1317/* 1318 * Purge a ksegrp resource. When a ksegrp is preparing to 1319 * exit, it calls this function. 1320 */ 1321void 1322kse_purge_group(struct thread *td) 1323{ 1324 struct ksegrp *kg; 1325 struct kse *ke; 1326 1327 kg = td->td_ksegrp; 1328 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1329 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1330 KASSERT(ke->ke_state == KES_IDLE, 1331 ("%s: wrong idle KSE state", __func__)); 1332 kse_unlink(ke); 1333 } 1334 KASSERT((kg->kg_kses == 1), 1335 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1336 KASSERT((kg->kg_numupcalls == 0), 1337 ("%s: ksegrp still has %d upcall datas", 1338 __func__, kg->kg_numupcalls)); 1339} 1340 1341/* 1342 * Purge a process's KSE resource. When a process is preparing to 1343 * exit, it calls kse_purge to release any extra KSE resources in 1344 * the process. 1345 */ 1346void 1347kse_purge(struct proc *p, struct thread *td) 1348{ 1349 struct ksegrp *kg; 1350 struct kse *ke; 1351 1352 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1353 mtx_lock_spin(&sched_lock); 1354 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1355 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1356 p->p_numksegrps--; 1357 /* 1358 * There is no ownership for KSE, after all threads 1359 * in the group exited, it is possible that some KSEs 1360 * were left in idle queue, gc them now. 1361 */ 1362 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1363 KASSERT(ke->ke_state == KES_IDLE, 1364 ("%s: wrong idle KSE state", __func__)); 1365 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1366 kg->kg_idle_kses--; 1367 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1368 kg->kg_kses--; 1369 kse_stash(ke); 1370 } 1371 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1372 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1373 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1374 KASSERT((kg->kg_numupcalls == 0), 1375 ("%s: ksegrp still has %d upcall datas", 1376 __func__, kg->kg_numupcalls)); 1377 1378 if (kg != td->td_ksegrp) 1379 ksegrp_stash(kg); 1380 } 1381 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1382 p->p_numksegrps++; 1383 mtx_unlock_spin(&sched_lock); 1384} 1385 1386/* 1387 * This function is intended to be used to initialize a spare thread 1388 * for upcall. Initialize thread's large data area outside sched_lock 1389 * for thread_schedule_upcall(). 1390 */ 1391void 1392thread_alloc_spare(struct thread *td, struct thread *spare) 1393{ 1394 if (td->td_standin) 1395 return; 1396 if (spare == NULL) 1397 spare = thread_alloc(); 1398 td->td_standin = spare; 1399 bzero(&spare->td_startzero, 1400 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1401 spare->td_proc = td->td_proc; 1402 /* Setup PCB and fork address */ 1403 cpu_set_upcall(spare, td->td_pcb); 1404 /* 1405 * XXXKSE do we really need this? (default values for the 1406 * frame). 1407 */ 1408 bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe)); 1409 spare->td_ucred = crhold(td->td_ucred); 1410} 1411 1412/* 1413 * Create a thread and schedule it for upcall on the KSE given. 1414 * Use our thread's standin so that we don't have to allocate one. 1415 */ 1416struct thread * 1417thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1418{ 1419 struct thread *td2; 1420 1421 mtx_assert(&sched_lock, MA_OWNED); 1422 1423 /* 1424 * Schedule an upcall thread on specified kse_upcall, 1425 * the kse_upcall must be free. 1426 * td must have a spare thread. 1427 */ 1428 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1429 if ((td2 = td->td_standin) != NULL) { 1430 td->td_standin = NULL; 1431 } else { 1432 panic("no reserve thread when scheduling an upcall"); 1433 return (NULL); 1434 } 1435 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1436 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1437 bcopy(&td->td_startcopy, &td2->td_startcopy, 1438 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1439 thread_link(td2, ku->ku_ksegrp); 1440 /* Let the new thread become owner of the upcall */ 1441 ku->ku_owner = td2; 1442 td2->td_upcall = ku; 1443 td2->td_flags = TDF_UPCALLING; 1444 if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1445 td2->td_flags |= TDF_ASTPENDING; 1446 td2->td_kse = NULL; 1447 td2->td_state = TDS_CAN_RUN; 1448 td2->td_inhibitors = 0; 1449 setrunqueue(td2); 1450 return (td2); /* bogus.. should be a void function */ 1451} 1452 1453void 1454thread_signal_add(struct thread *td, int sig) 1455{ 1456 struct kse_upcall *ku; 1457 struct proc *p; 1458 sigset_t ss; 1459 int error; 1460 1461 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1462 td = curthread; 1463 ku = td->td_upcall; 1464 p = td->td_proc; 1465 1466 PROC_UNLOCK(p); 1467 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1468 if (error) 1469 goto error; 1470 1471 SIGADDSET(ss, sig); 1472 1473 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1474 if (error) 1475 goto error; 1476 1477 PROC_LOCK(p); 1478 return; 1479error: 1480 PROC_LOCK(p); 1481 sigexit(td, SIGILL); 1482} 1483 1484 1485/* 1486 * Schedule an upcall to notify a KSE process recieved signals. 1487 * 1488 */ 1489void 1490thread_signal_upcall(struct thread *td) 1491{ 1492 mtx_lock_spin(&sched_lock); 1493 td->td_flags |= TDF_UPCALLING; 1494 mtx_unlock_spin(&sched_lock); 1495 1496 return; 1497} 1498 1499/* 1500 * Setup done on the thread when it enters the kernel. 1501 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1502 */ 1503void 1504thread_user_enter(struct proc *p, struct thread *td) 1505{ 1506 struct ksegrp *kg; 1507 struct kse_upcall *ku; 1508 1509 kg = td->td_ksegrp; 1510 /* 1511 * First check that we shouldn't just abort. 1512 * But check if we are the single thread first! 1513 * XXX p_singlethread not locked, but should be safe. 1514 */ 1515 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1516 PROC_LOCK(p); 1517 mtx_lock_spin(&sched_lock); 1518 thread_stopped(p); 1519 thread_exit(); 1520 /* NOTREACHED */ 1521 } 1522 1523 /* 1524 * If we are doing a syscall in a KSE environment, 1525 * note where our mailbox is. There is always the 1526 * possibility that we could do this lazily (in kse_reassign()), 1527 * but for now do it every time. 1528 */ 1529 kg = td->td_ksegrp; 1530 if (kg->kg_numupcalls) { 1531 ku = td->td_upcall; 1532 KASSERT(ku, ("%s: no upcall owned", __func__)); 1533 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1534 td->td_mailbox = 1535 (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1536 if ((td->td_mailbox == NULL) || 1537 (td->td_mailbox == (void *)-1)) { 1538 /* Don't schedule upcall when blocked */ 1539 td->td_mailbox = NULL; 1540 mtx_lock_spin(&sched_lock); 1541 td->td_flags &= ~TDF_CAN_UNBIND; 1542 mtx_unlock_spin(&sched_lock); 1543 } else { 1544 if (td->td_standin == NULL) 1545 thread_alloc_spare(td, NULL); 1546 mtx_lock_spin(&sched_lock); 1547 td->td_flags |= TDF_CAN_UNBIND; 1548 mtx_unlock_spin(&sched_lock); 1549 } 1550 } 1551} 1552 1553/* 1554 * The extra work we go through if we are a threaded process when we 1555 * return to userland. 1556 * 1557 * If we are a KSE process and returning to user mode, check for 1558 * extra work to do before we return (e.g. for more syscalls 1559 * to complete first). If we were in a critical section, we should 1560 * just return to let it finish. Same if we were in the UTS (in 1561 * which case the mailbox's context's busy indicator will be set). 1562 * The only traps we suport will have set the mailbox. 1563 * We will clear it here. 1564 */ 1565int 1566thread_userret(struct thread *td, struct trapframe *frame) 1567{ 1568 int error = 0, upcalls; 1569 struct kse_upcall *ku; 1570 struct ksegrp *kg, *kg2; 1571 struct proc *p; 1572 struct timespec ts; 1573 1574 p = td->td_proc; 1575 kg = td->td_ksegrp; 1576 1577 /* Nothing to do with non-threaded group/process */ 1578 if (td->td_ksegrp->kg_numupcalls == 0) 1579 return (0); 1580 1581 /* 1582 * Stat clock interrupt hit in userland, it 1583 * is returning from interrupt, charge thread's 1584 * userland time for UTS. 1585 */ 1586 if (td->td_flags & TDF_USTATCLOCK) { 1587 thread_update_usr_ticks(td, 1); 1588 mtx_lock_spin(&sched_lock); 1589 td->td_flags &= ~TDF_USTATCLOCK; 1590 mtx_unlock_spin(&sched_lock); 1591 if (kg->kg_completed || 1592 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1593 thread_user_enter(p, td); 1594 } 1595 1596 /* 1597 * Optimisation: 1598 * This thread has not started any upcall. 1599 * If there is no work to report other than ourself, 1600 * then it can return direct to userland. 1601 */ 1602 if (TD_CAN_UNBIND(td)) { 1603 mtx_lock_spin(&sched_lock); 1604 td->td_flags &= ~TDF_CAN_UNBIND; 1605 mtx_unlock_spin(&sched_lock); 1606 if ((p->p_flag & PS_NEEDSIGCHK) == 0 && 1607 (kg->kg_completed == NULL) && 1608 (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) { 1609 thread_update_usr_ticks(td, 0); 1610 td->td_mailbox = 0; 1611 return (0); 1612 } 1613 error = thread_export_context(td); 1614 if (error) { 1615 /* 1616 * Failing to do the KSE operation just defaults 1617 * back to synchonous operation, so just return from 1618 * the syscall. 1619 */ 1620 return (0); 1621 } 1622 /* 1623 * There is something to report, and we own an upcall 1624 * strucuture, we can go to userland. 1625 * Turn ourself into an upcall thread. 1626 */ 1627 mtx_lock_spin(&sched_lock); 1628 td->td_flags |= TDF_UPCALLING; 1629 mtx_unlock_spin(&sched_lock); 1630 } else if (td->td_mailbox) { 1631 error = thread_export_context(td); 1632 /* possibly upcall with error? */ 1633 PROC_LOCK(p); 1634 /* 1635 * There are upcall threads waiting for 1636 * work to do, wake one of them up. 1637 * XXXKSE Maybe wake all of them up. 1638 */ 1639 if (!error && kg->kg_upsleeps) 1640 wakeup_one(&kg->kg_completed); 1641 mtx_lock_spin(&sched_lock); 1642 thread_stopped(p); 1643 thread_exit(); 1644 /* NOTREACHED */ 1645 } 1646 1647 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1648 1649 if (p->p_numthreads > max_threads_per_proc) { 1650 max_threads_hits++; 1651 PROC_LOCK(p); 1652 while (p->p_numthreads > max_threads_per_proc) { 1653 if (P_SHOULDSTOP(p)) 1654 break; 1655 upcalls = 0; 1656 mtx_lock_spin(&sched_lock); 1657 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1658 if (kg2->kg_numupcalls == 0) 1659 upcalls++; 1660 else 1661 upcalls += kg2->kg_numupcalls; 1662 } 1663 mtx_unlock_spin(&sched_lock); 1664 if (upcalls >= max_threads_per_proc) 1665 break; 1666 p->p_maxthrwaits++; 1667 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1668 "maxthreads", NULL); 1669 p->p_maxthrwaits--; 1670 } 1671 PROC_UNLOCK(p); 1672 } 1673 1674 if (td->td_flags & TDF_UPCALLING) { 1675 ku = td->td_upcall; 1676 /* 1677 * There is no more work to do and we are going to ride 1678 * this thread up to userland as an upcall. 1679 * Do the last parts of the setup needed for the upcall. 1680 */ 1681 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1682 td, td->td_proc->p_pid, td->td_proc->p_comm); 1683 1684 /* 1685 * Set user context to the UTS. 1686 * Will use Giant in cpu_thread_clean() because it uses 1687 * kmem_free(kernel_map, ...) 1688 */ 1689 cpu_set_upcall_kse(td, ku); 1690 mtx_lock_spin(&sched_lock); 1691 td->td_flags &= ~TDF_UPCALLING; 1692 if (ku->ku_flags & KUF_DOUPCALL) 1693 ku->ku_flags &= ~KUF_DOUPCALL; 1694 mtx_unlock_spin(&sched_lock); 1695 1696 /* 1697 * Unhook the list of completed threads. 1698 * anything that completes after this gets to 1699 * come in next time. 1700 * Put the list of completed thread mailboxes on 1701 * this KSE's mailbox. 1702 */ 1703 error = thread_link_mboxes(kg, ku); 1704 if (error) 1705 goto out; 1706 1707 /* 1708 * Set state and clear the thread mailbox pointer. 1709 * From now on we are just a bound outgoing process. 1710 * **Problem** userret is often called several times. 1711 * it would be nice if this all happenned only on the first 1712 * time through. (the scan for extra work etc.) 1713 */ 1714 error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0); 1715 if (error) 1716 goto out; 1717 1718 /* Export current system time */ 1719 nanotime(&ts); 1720 error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday, 1721 sizeof(ts)); 1722 } 1723 1724out: 1725 if (error) { 1726 /* 1727 * Things are going to be so screwed we should just kill 1728 * the process. 1729 * how do we do that? 1730 */ 1731 PROC_LOCK(td->td_proc); 1732 psignal(td->td_proc, SIGSEGV); 1733 PROC_UNLOCK(td->td_proc); 1734 } else { 1735 /* 1736 * Optimisation: 1737 * Ensure that we have a spare thread available, 1738 * for when we re-enter the kernel. 1739 */ 1740 if (td->td_standin == NULL) 1741 thread_alloc_spare(td, NULL); 1742 } 1743 1744 /* 1745 * Clear thread mailbox first, then clear system tick count. 1746 * The order is important because thread_statclock() use 1747 * mailbox pointer to see if it is an userland thread or 1748 * an UTS kernel thread. 1749 */ 1750 td->td_mailbox = NULL; 1751 td->td_usticks = 0; 1752 return (error); /* go sync */ 1753} 1754 1755/* 1756 * Enforce single-threading. 1757 * 1758 * Returns 1 if the caller must abort (another thread is waiting to 1759 * exit the process or similar). Process is locked! 1760 * Returns 0 when you are successfully the only thread running. 1761 * A process has successfully single threaded in the suspend mode when 1762 * There are no threads in user mode. Threads in the kernel must be 1763 * allowed to continue until they get to the user boundary. They may even 1764 * copy out their return values and data before suspending. They may however be 1765 * accellerated in reaching the user boundary as we will wake up 1766 * any sleeping threads that are interruptable. (PCATCH). 1767 */ 1768int 1769thread_single(int force_exit) 1770{ 1771 struct thread *td; 1772 struct thread *td2; 1773 struct proc *p; 1774 1775 td = curthread; 1776 p = td->td_proc; 1777 mtx_assert(&Giant, MA_OWNED); 1778 PROC_LOCK_ASSERT(p, MA_OWNED); 1779 KASSERT((td != NULL), ("curthread is NULL")); 1780 1781 if ((p->p_flag & P_THREADED) == 0) 1782 return (0); 1783 1784 /* Is someone already single threading? */ 1785 if (p->p_singlethread) 1786 return (1); 1787 1788 if (force_exit == SINGLE_EXIT) { 1789 p->p_flag |= P_SINGLE_EXIT; 1790 } else 1791 p->p_flag &= ~P_SINGLE_EXIT; 1792 p->p_flag |= P_STOPPED_SINGLE; 1793 p->p_singlethread = td; 1794 /* XXXKSE Which lock protects the below values? */ 1795 while ((p->p_numthreads - p->p_suspcount) != 1) { 1796 mtx_lock_spin(&sched_lock); 1797 FOREACH_THREAD_IN_PROC(p, td2) { 1798 if (td2 == td) 1799 continue; 1800 td->td_flags |= TDF_ASTPENDING; 1801 if (TD_IS_INHIBITED(td2)) { 1802 if (force_exit == SINGLE_EXIT) { 1803 if (TD_IS_SUSPENDED(td2)) { 1804 thread_unsuspend_one(td2); 1805 } 1806 if (TD_ON_SLEEPQ(td2) && 1807 (td2->td_flags & TDF_SINTR)) { 1808 if (td2->td_flags & TDF_CVWAITQ) 1809 cv_abort(td2); 1810 else 1811 abortsleep(td2); 1812 } 1813 } else { 1814 if (TD_IS_SUSPENDED(td2)) 1815 continue; 1816 /* 1817 * maybe other inhibitted states too? 1818 * XXXKSE Is it totally safe to 1819 * suspend a non-interruptable thread? 1820 */ 1821 if (td2->td_inhibitors & 1822 (TDI_SLEEPING | TDI_SWAPPED)) 1823 thread_suspend_one(td2); 1824 } 1825 } 1826 } 1827 /* 1828 * Maybe we suspended some threads.. was it enough? 1829 */ 1830 if ((p->p_numthreads - p->p_suspcount) == 1) { 1831 mtx_unlock_spin(&sched_lock); 1832 break; 1833 } 1834 1835 /* 1836 * Wake us up when everyone else has suspended. 1837 * In the mean time we suspend as well. 1838 */ 1839 thread_suspend_one(td); 1840 mtx_unlock(&Giant); 1841 PROC_UNLOCK(p); 1842 p->p_stats->p_ru.ru_nvcsw++; 1843 mi_switch(); 1844 mtx_unlock_spin(&sched_lock); 1845 mtx_lock(&Giant); 1846 PROC_LOCK(p); 1847 } 1848 if (force_exit == SINGLE_EXIT) { 1849 if (td->td_upcall) { 1850 mtx_lock_spin(&sched_lock); 1851 upcall_remove(td); 1852 mtx_unlock_spin(&sched_lock); 1853 } 1854 kse_purge(p, td); 1855 } 1856 return (0); 1857} 1858 1859/* 1860 * Called in from locations that can safely check to see 1861 * whether we have to suspend or at least throttle for a 1862 * single-thread event (e.g. fork). 1863 * 1864 * Such locations include userret(). 1865 * If the "return_instead" argument is non zero, the thread must be able to 1866 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1867 * 1868 * The 'return_instead' argument tells the function if it may do a 1869 * thread_exit() or suspend, or whether the caller must abort and back 1870 * out instead. 1871 * 1872 * If the thread that set the single_threading request has set the 1873 * P_SINGLE_EXIT bit in the process flags then this call will never return 1874 * if 'return_instead' is false, but will exit. 1875 * 1876 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1877 *---------------+--------------------+--------------------- 1878 * 0 | returns 0 | returns 0 or 1 1879 * | when ST ends | immediatly 1880 *---------------+--------------------+--------------------- 1881 * 1 | thread exits | returns 1 1882 * | | immediatly 1883 * 0 = thread_exit() or suspension ok, 1884 * other = return error instead of stopping the thread. 1885 * 1886 * While a full suspension is under effect, even a single threading 1887 * thread would be suspended if it made this call (but it shouldn't). 1888 * This call should only be made from places where 1889 * thread_exit() would be safe as that may be the outcome unless 1890 * return_instead is set. 1891 */ 1892int 1893thread_suspend_check(int return_instead) 1894{ 1895 struct thread *td; 1896 struct proc *p; 1897 struct ksegrp *kg; 1898 1899 td = curthread; 1900 p = td->td_proc; 1901 kg = td->td_ksegrp; 1902 PROC_LOCK_ASSERT(p, MA_OWNED); 1903 while (P_SHOULDSTOP(p)) { 1904 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1905 KASSERT(p->p_singlethread != NULL, 1906 ("singlethread not set")); 1907 /* 1908 * The only suspension in action is a 1909 * single-threading. Single threader need not stop. 1910 * XXX Should be safe to access unlocked 1911 * as it can only be set to be true by us. 1912 */ 1913 if (p->p_singlethread == td) 1914 return (0); /* Exempt from stopping. */ 1915 } 1916 if (return_instead) 1917 return (1); 1918 1919 mtx_lock_spin(&sched_lock); 1920 thread_stopped(p); 1921 /* 1922 * If the process is waiting for us to exit, 1923 * this thread should just suicide. 1924 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1925 */ 1926 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1927 while (mtx_owned(&Giant)) 1928 mtx_unlock(&Giant); 1929 thread_exit(); 1930 } 1931 1932 /* 1933 * When a thread suspends, it just 1934 * moves to the processes's suspend queue 1935 * and stays there. 1936 */ 1937 mtx_assert(&Giant, MA_NOTOWNED); 1938 thread_suspend_one(td); 1939 PROC_UNLOCK(p); 1940 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1941 if (p->p_numthreads == p->p_suspcount) { 1942 thread_unsuspend_one(p->p_singlethread); 1943 } 1944 } 1945 p->p_stats->p_ru.ru_nivcsw++; 1946 mi_switch(); 1947 mtx_unlock_spin(&sched_lock); 1948 PROC_LOCK(p); 1949 } 1950 return (0); 1951} 1952 1953void 1954thread_suspend_one(struct thread *td) 1955{ 1956 struct proc *p = td->td_proc; 1957 1958 mtx_assert(&sched_lock, MA_OWNED); 1959 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1960 p->p_suspcount++; 1961 TD_SET_SUSPENDED(td); 1962 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1963 /* 1964 * Hack: If we are suspending but are on the sleep queue 1965 * then we are in msleep or the cv equivalent. We 1966 * want to look like we have two Inhibitors. 1967 * May already be set.. doesn't matter. 1968 */ 1969 if (TD_ON_SLEEPQ(td)) 1970 TD_SET_SLEEPING(td); 1971} 1972 1973void 1974thread_unsuspend_one(struct thread *td) 1975{ 1976 struct proc *p = td->td_proc; 1977 1978 mtx_assert(&sched_lock, MA_OWNED); 1979 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 1980 TD_CLR_SUSPENDED(td); 1981 p->p_suspcount--; 1982 setrunnable(td); 1983} 1984 1985/* 1986 * Allow all threads blocked by single threading to continue running. 1987 */ 1988void 1989thread_unsuspend(struct proc *p) 1990{ 1991 struct thread *td; 1992 1993 mtx_assert(&sched_lock, MA_OWNED); 1994 PROC_LOCK_ASSERT(p, MA_OWNED); 1995 if (!P_SHOULDSTOP(p)) { 1996 while (( td = TAILQ_FIRST(&p->p_suspended))) { 1997 thread_unsuspend_one(td); 1998 } 1999 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 2000 (p->p_numthreads == p->p_suspcount)) { 2001 /* 2002 * Stopping everything also did the job for the single 2003 * threading request. Now we've downgraded to single-threaded, 2004 * let it continue. 2005 */ 2006 thread_unsuspend_one(p->p_singlethread); 2007 } 2008} 2009 2010void 2011thread_single_end(void) 2012{ 2013 struct thread *td; 2014 struct proc *p; 2015 2016 td = curthread; 2017 p = td->td_proc; 2018 PROC_LOCK_ASSERT(p, MA_OWNED); 2019 p->p_flag &= ~P_STOPPED_SINGLE; 2020 p->p_singlethread = NULL; 2021 /* 2022 * If there are other threads they mey now run, 2023 * unless of course there is a blanket 'stop order' 2024 * on the process. The single threader must be allowed 2025 * to continue however as this is a bad place to stop. 2026 */ 2027 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2028 mtx_lock_spin(&sched_lock); 2029 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2030 thread_unsuspend_one(td); 2031 } 2032 mtx_unlock_spin(&sched_lock); 2033 } 2034} 2035 2036 2037