kern_thread.c revision 116440
1/* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 116440 2003-06-16 13:14:52Z davidxu $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/mutex.h> 38#include <sys/proc.h> 39#include <sys/smp.h> 40#include <sys/sysctl.h> 41#include <sys/sysproto.h> 42#include <sys/filedesc.h> 43#include <sys/sched.h> 44#include <sys/signalvar.h> 45#include <sys/sx.h> 46#include <sys/tty.h> 47#include <sys/user.h> 48#include <sys/jail.h> 49#include <sys/kse.h> 50#include <sys/ktr.h> 51#include <sys/ucontext.h> 52 53#include <vm/vm.h> 54#include <vm/vm_extern.h> 55#include <vm/vm_object.h> 56#include <vm/pmap.h> 57#include <vm/uma.h> 58#include <vm/vm_map.h> 59 60#include <machine/frame.h> 61 62/* 63 * KSEGRP related storage. 64 */ 65static uma_zone_t ksegrp_zone; 66static uma_zone_t kse_zone; 67static uma_zone_t thread_zone; 68static uma_zone_t upcall_zone; 69 70/* DEBUG ONLY */ 71SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 72static int thread_debug = 0; 73SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 74 &thread_debug, 0, "thread debug"); 75 76static int max_threads_per_proc = 150; 77SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 78 &max_threads_per_proc, 0, "Limit on threads per proc"); 79 80static int max_groups_per_proc = 50; 81SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 82 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 83 84static int max_threads_hits; 85SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 86 &max_threads_hits, 0, ""); 87 88static int virtual_cpu; 89 90#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 91 92TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 93TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 94TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 95TAILQ_HEAD(, kse_upcall) zombie_upcalls = 96 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 97struct mtx kse_zombie_lock; 98MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 99 100static void kse_purge(struct proc *p, struct thread *td); 101static void kse_purge_group(struct thread *td); 102static int thread_update_usr_ticks(struct thread *td, int user); 103static void thread_alloc_spare(struct thread *td, struct thread *spare); 104 105static int 106sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 107{ 108 int error, new_val; 109 int def_val; 110 111#ifdef SMP 112 def_val = mp_ncpus; 113#else 114 def_val = 1; 115#endif 116 if (virtual_cpu == 0) 117 new_val = def_val; 118 else 119 new_val = virtual_cpu; 120 error = sysctl_handle_int(oidp, &new_val, 0, req); 121 if (error != 0 || req->newptr == NULL) 122 return (error); 123 if (new_val < 0) 124 return (EINVAL); 125 virtual_cpu = new_val; 126 return (0); 127} 128 129/* DEBUG ONLY */ 130SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 131 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 132 "debug virtual cpus"); 133 134/* 135 * Prepare a thread for use. 136 */ 137static void 138thread_ctor(void *mem, int size, void *arg) 139{ 140 struct thread *td; 141 142 td = (struct thread *)mem; 143 td->td_state = TDS_INACTIVE; 144 td->td_oncpu = NOCPU; 145} 146 147/* 148 * Reclaim a thread after use. 149 */ 150static void 151thread_dtor(void *mem, int size, void *arg) 152{ 153 struct thread *td; 154 155 td = (struct thread *)mem; 156 157#ifdef INVARIANTS 158 /* Verify that this thread is in a safe state to free. */ 159 switch (td->td_state) { 160 case TDS_INHIBITED: 161 case TDS_RUNNING: 162 case TDS_CAN_RUN: 163 case TDS_RUNQ: 164 /* 165 * We must never unlink a thread that is in one of 166 * these states, because it is currently active. 167 */ 168 panic("bad state for thread unlinking"); 169 /* NOTREACHED */ 170 case TDS_INACTIVE: 171 break; 172 default: 173 panic("bad thread state"); 174 /* NOTREACHED */ 175 } 176#endif 177} 178 179/* 180 * Initialize type-stable parts of a thread (when newly created). 181 */ 182static void 183thread_init(void *mem, int size) 184{ 185 struct thread *td; 186 187 td = (struct thread *)mem; 188 mtx_lock(&Giant); 189 vm_thread_new(td, 0); 190 mtx_unlock(&Giant); 191 cpu_thread_setup(td); 192 td->td_sched = (struct td_sched *)&td[1]; 193} 194 195/* 196 * Tear down type-stable parts of a thread (just before being discarded). 197 */ 198static void 199thread_fini(void *mem, int size) 200{ 201 struct thread *td; 202 203 td = (struct thread *)mem; 204 vm_thread_dispose(td); 205} 206 207/* 208 * Initialize type-stable parts of a kse (when newly created). 209 */ 210static void 211kse_init(void *mem, int size) 212{ 213 struct kse *ke; 214 215 ke = (struct kse *)mem; 216 ke->ke_sched = (struct ke_sched *)&ke[1]; 217} 218 219/* 220 * Initialize type-stable parts of a ksegrp (when newly created). 221 */ 222static void 223ksegrp_init(void *mem, int size) 224{ 225 struct ksegrp *kg; 226 227 kg = (struct ksegrp *)mem; 228 kg->kg_sched = (struct kg_sched *)&kg[1]; 229} 230 231/* 232 * KSE is linked into kse group. 233 */ 234void 235kse_link(struct kse *ke, struct ksegrp *kg) 236{ 237 struct proc *p = kg->kg_proc; 238 239 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 240 kg->kg_kses++; 241 ke->ke_state = KES_UNQUEUED; 242 ke->ke_proc = p; 243 ke->ke_ksegrp = kg; 244 ke->ke_thread = NULL; 245 ke->ke_oncpu = NOCPU; 246 ke->ke_flags = 0; 247} 248 249void 250kse_unlink(struct kse *ke) 251{ 252 struct ksegrp *kg; 253 254 mtx_assert(&sched_lock, MA_OWNED); 255 kg = ke->ke_ksegrp; 256 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 257 if (ke->ke_state == KES_IDLE) { 258 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 259 kg->kg_idle_kses--; 260 } 261 if (--kg->kg_kses == 0) 262 ksegrp_unlink(kg); 263 /* 264 * Aggregate stats from the KSE 265 */ 266 kse_stash(ke); 267} 268 269void 270ksegrp_link(struct ksegrp *kg, struct proc *p) 271{ 272 273 TAILQ_INIT(&kg->kg_threads); 274 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 275 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 276 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 277 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 278 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 279 kg->kg_proc = p; 280 /* 281 * the following counters are in the -zero- section 282 * and may not need clearing 283 */ 284 kg->kg_numthreads = 0; 285 kg->kg_runnable = 0; 286 kg->kg_kses = 0; 287 kg->kg_runq_kses = 0; /* XXXKSE change name */ 288 kg->kg_idle_kses = 0; 289 kg->kg_numupcalls = 0; 290 /* link it in now that it's consistent */ 291 p->p_numksegrps++; 292 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 293} 294 295void 296ksegrp_unlink(struct ksegrp *kg) 297{ 298 struct proc *p; 299 300 mtx_assert(&sched_lock, MA_OWNED); 301 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 302 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 303 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 304 305 p = kg->kg_proc; 306 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 307 p->p_numksegrps--; 308 /* 309 * Aggregate stats from the KSE 310 */ 311 ksegrp_stash(kg); 312} 313 314struct kse_upcall * 315upcall_alloc(void) 316{ 317 struct kse_upcall *ku; 318 319 ku = uma_zalloc(upcall_zone, M_WAITOK); 320 bzero(ku, sizeof(*ku)); 321 return (ku); 322} 323 324void 325upcall_free(struct kse_upcall *ku) 326{ 327 328 uma_zfree(upcall_zone, ku); 329} 330 331void 332upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 333{ 334 335 mtx_assert(&sched_lock, MA_OWNED); 336 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 337 ku->ku_ksegrp = kg; 338 kg->kg_numupcalls++; 339} 340 341void 342upcall_unlink(struct kse_upcall *ku) 343{ 344 struct ksegrp *kg = ku->ku_ksegrp; 345 346 mtx_assert(&sched_lock, MA_OWNED); 347 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 348 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 349 kg->kg_numupcalls--; 350 upcall_stash(ku); 351} 352 353void 354upcall_remove(struct thread *td) 355{ 356 357 if (td->td_upcall) { 358 td->td_upcall->ku_owner = NULL; 359 upcall_unlink(td->td_upcall); 360 td->td_upcall = 0; 361 } 362} 363 364/* 365 * For a newly created process, 366 * link up all the structures and its initial threads etc. 367 */ 368void 369proc_linkup(struct proc *p, struct ksegrp *kg, 370 struct kse *ke, struct thread *td) 371{ 372 373 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 374 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 375 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 376 p->p_numksegrps = 0; 377 p->p_numthreads = 0; 378 379 ksegrp_link(kg, p); 380 kse_link(ke, kg); 381 thread_link(td, kg); 382} 383 384/* 385struct kse_thr_interrupt_args { 386 struct kse_thr_mailbox * tmbx; 387}; 388*/ 389int 390kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 391{ 392 struct proc *p; 393 struct thread *td2; 394 395 p = td->td_proc; 396 if (!(p->p_flag & P_SA) || (uap->tmbx == NULL)) 397 return (EINVAL); 398 mtx_lock_spin(&sched_lock); 399 FOREACH_THREAD_IN_PROC(p, td2) { 400 if (td2->td_mailbox == uap->tmbx) { 401 td2->td_flags |= TDF_INTERRUPT; 402 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 403 if (td2->td_flags & TDF_CVWAITQ) 404 cv_abort(td2); 405 else 406 abortsleep(td2); 407 } 408 mtx_unlock_spin(&sched_lock); 409 return (0); 410 } 411 } 412 mtx_unlock_spin(&sched_lock); 413 return (ESRCH); 414} 415 416/* 417struct kse_exit_args { 418 register_t dummy; 419}; 420*/ 421int 422kse_exit(struct thread *td, struct kse_exit_args *uap) 423{ 424 struct proc *p; 425 struct ksegrp *kg; 426 struct kse *ke; 427 struct kse_upcall *ku, *ku2; 428 int error, count; 429 430 p = td->td_proc; 431 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 432 return (EINVAL); 433 kg = td->td_ksegrp; 434 count = 0; 435 PROC_LOCK(p); 436 mtx_lock_spin(&sched_lock); 437 FOREACH_UPCALL_IN_GROUP(kg, ku2) { 438 if (ku2->ku_flags & KUF_EXITING) 439 count++; 440 } 441 if ((kg->kg_numupcalls - count) == 1 && 442 (kg->kg_numthreads > 1)) { 443 mtx_unlock_spin(&sched_lock); 444 PROC_UNLOCK(p); 445 return (EDEADLK); 446 } 447 ku->ku_flags |= KUF_EXITING; 448 mtx_unlock_spin(&sched_lock); 449 PROC_UNLOCK(p); 450 error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); 451 PROC_LOCK(p); 452 if (error) 453 psignal(p, SIGSEGV); 454 mtx_lock_spin(&sched_lock); 455 upcall_remove(td); 456 ke = td->td_kse; 457 if (p->p_numthreads == 1) { 458 kse_purge(p, td); 459 p->p_flag &= ~P_SA; 460 mtx_unlock_spin(&sched_lock); 461 PROC_UNLOCK(p); 462 } else { 463 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 464 kse_purge_group(td); 465 ke->ke_flags |= KEF_EXIT; 466 } 467 thread_stopped(p); 468 thread_exit(); 469 /* NOTREACHED */ 470 } 471 return (0); 472} 473 474/* 475 * Either becomes an upcall or waits for an awakening event and 476 * then becomes an upcall. Only error cases return. 477 */ 478/* 479struct kse_release_args { 480 struct timespec *timeout; 481}; 482*/ 483int 484kse_release(struct thread *td, struct kse_release_args *uap) 485{ 486 struct proc *p; 487 struct ksegrp *kg; 488 struct kse_upcall *ku; 489 struct timespec timeout; 490 struct timeval tv; 491 int error; 492 493 p = td->td_proc; 494 kg = td->td_ksegrp; 495 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 496 return (EINVAL); 497 if (uap->timeout != NULL) { 498 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 499 return (error); 500 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 501 } 502 if (td->td_flags & TDF_SA) 503 td->td_pflags |= TDP_UPCALLING; 504 PROC_LOCK(p); 505 if ((ku->ku_flags & KUF_DOUPCALL) == 0 && (kg->kg_completed == NULL)) { 506 kg->kg_upsleeps++; 507 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 508 "kserel", (uap->timeout ? tvtohz(&tv) : 0)); 509 kg->kg_upsleeps--; 510 } 511 if (ku->ku_flags & KUF_DOUPCALL) { 512 mtx_lock_spin(&sched_lock); 513 ku->ku_flags &= ~KUF_DOUPCALL; 514 mtx_unlock_spin(&sched_lock); 515 } 516 PROC_UNLOCK(p); 517 return (0); 518} 519 520/* struct kse_wakeup_args { 521 struct kse_mailbox *mbx; 522}; */ 523int 524kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 525{ 526 struct proc *p; 527 struct ksegrp *kg; 528 struct kse_upcall *ku; 529 struct thread *td2; 530 531 p = td->td_proc; 532 td2 = NULL; 533 ku = NULL; 534 /* KSE-enabled processes only, please. */ 535 if (!(p->p_flag & P_SA)) 536 return (EINVAL); 537 PROC_LOCK(p); 538 mtx_lock_spin(&sched_lock); 539 if (uap->mbx) { 540 FOREACH_KSEGRP_IN_PROC(p, kg) { 541 FOREACH_UPCALL_IN_GROUP(kg, ku) { 542 if (ku->ku_mailbox == uap->mbx) 543 break; 544 } 545 if (ku) 546 break; 547 } 548 } else { 549 kg = td->td_ksegrp; 550 if (kg->kg_upsleeps) { 551 wakeup_one(&kg->kg_completed); 552 mtx_unlock_spin(&sched_lock); 553 PROC_UNLOCK(p); 554 return (0); 555 } 556 ku = TAILQ_FIRST(&kg->kg_upcalls); 557 } 558 if (ku) { 559 if ((td2 = ku->ku_owner) == NULL) { 560 panic("%s: no owner", __func__); 561 } else if (TD_ON_SLEEPQ(td2) && 562 (td2->td_wchan == &kg->kg_completed)) { 563 abortsleep(td2); 564 } else { 565 ku->ku_flags |= KUF_DOUPCALL; 566 } 567 mtx_unlock_spin(&sched_lock); 568 PROC_UNLOCK(p); 569 return (0); 570 } 571 mtx_unlock_spin(&sched_lock); 572 PROC_UNLOCK(p); 573 return (ESRCH); 574} 575 576/* 577 * No new KSEG: first call: use current KSE, don't schedule an upcall 578 * All other situations, do allocate max new KSEs and schedule an upcall. 579 */ 580/* struct kse_create_args { 581 struct kse_mailbox *mbx; 582 int newgroup; 583}; */ 584int 585kse_create(struct thread *td, struct kse_create_args *uap) 586{ 587 struct kse *newke; 588 struct ksegrp *newkg; 589 struct ksegrp *kg; 590 struct proc *p; 591 struct kse_mailbox mbx; 592 struct kse_upcall *newku; 593 int err, ncpus, sa = 0, first = 0; 594 struct thread *newtd; 595 596 p = td->td_proc; 597 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 598 return (err); 599 600 /* Too bad, why hasn't kernel always a cpu counter !? */ 601#ifdef SMP 602 ncpus = mp_ncpus; 603#else 604 ncpus = 1; 605#endif 606 if (virtual_cpu != 0) 607 ncpus = virtual_cpu; 608 if (!(mbx.km_flags & KMF_BOUND)) 609 sa = TDF_SA; 610 else 611 ncpus = 1; 612 PROC_LOCK(p); 613 if (!(p->p_flag & P_SA)) { 614 first = 1; 615 p->p_flag |= P_SA; 616 } 617 PROC_UNLOCK(p); 618 if (!sa && !uap->newgroup && !first) 619 return (EINVAL); 620 kg = td->td_ksegrp; 621 if (uap->newgroup) { 622 /* Have race condition but it is cheap */ 623 if (p->p_numksegrps >= max_groups_per_proc) 624 return (EPROCLIM); 625 /* 626 * If we want a new KSEGRP it doesn't matter whether 627 * we have already fired up KSE mode before or not. 628 * We put the process in KSE mode and create a new KSEGRP. 629 */ 630 newkg = ksegrp_alloc(); 631 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 632 kg_startzero, kg_endzero)); 633 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 634 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 635 mtx_lock_spin(&sched_lock); 636 if (p->p_numksegrps >= max_groups_per_proc) { 637 mtx_unlock_spin(&sched_lock); 638 ksegrp_free(newkg); 639 return (EPROCLIM); 640 } 641 ksegrp_link(newkg, p); 642 mtx_unlock_spin(&sched_lock); 643 } else { 644 newkg = kg; 645 } 646 647 /* 648 * Creating upcalls more than number of physical cpu does 649 * not help performance. 650 */ 651 if (newkg->kg_numupcalls >= ncpus) 652 return (EPROCLIM); 653 654 if (newkg->kg_numupcalls == 0) { 655 /* 656 * Initialize KSE group 657 * 658 * For multiplxed group, create KSEs as many as physical 659 * cpus. This increases concurrent even if userland 660 * is not MP safe and can only run on single CPU. 661 * In ideal world, every physical cpu should execute a thread. 662 * If there is enough KSEs, threads in kernel can be 663 * executed parallel on different cpus with full speed, 664 * Concurrent in kernel shouldn't be restricted by number of 665 * upcalls userland provides. Adding more upcall structures 666 * only increases concurrent in userland. 667 * 668 * For bound thread group, because there is only thread in the 669 * group, we only create one KSE for the group. Thread in this 670 * kind of group will never schedule an upcall when blocked, 671 * this intends to simulate pthread system scope thread. 672 */ 673 while (newkg->kg_kses < ncpus) { 674 newke = kse_alloc(); 675 bzero(&newke->ke_startzero, RANGEOF(struct kse, 676 ke_startzero, ke_endzero)); 677#if 0 678 mtx_lock_spin(&sched_lock); 679 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 680 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 681 mtx_unlock_spin(&sched_lock); 682#endif 683 mtx_lock_spin(&sched_lock); 684 kse_link(newke, newkg); 685 /* Add engine */ 686 kse_reassign(newke); 687 mtx_unlock_spin(&sched_lock); 688 } 689 } 690 newku = upcall_alloc(); 691 newku->ku_mailbox = uap->mbx; 692 newku->ku_func = mbx.km_func; 693 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 694 695 /* For the first call this may not have been set */ 696 if (td->td_standin == NULL) 697 thread_alloc_spare(td, NULL); 698 699 mtx_lock_spin(&sched_lock); 700 if (newkg->kg_numupcalls >= ncpus) { 701 mtx_unlock_spin(&sched_lock); 702 upcall_free(newku); 703 return (EPROCLIM); 704 } 705 upcall_link(newku, newkg); 706 if (mbx.km_quantum) 707 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 708 709 /* 710 * Each upcall structure has an owner thread, find which 711 * one owns it. 712 */ 713 if (uap->newgroup) { 714 /* 715 * Because new ksegrp hasn't thread, 716 * create an initial upcall thread to own it. 717 */ 718 newtd = thread_schedule_upcall(td, newku); 719 } else { 720 /* 721 * If current thread hasn't an upcall structure, 722 * just assign the upcall to it. 723 */ 724 if (td->td_upcall == NULL) { 725 newku->ku_owner = td; 726 td->td_upcall = newku; 727 newtd = td; 728 } else { 729 /* 730 * Create a new upcall thread to own it. 731 */ 732 newtd = thread_schedule_upcall(td, newku); 733 } 734 } 735 if (!sa) { 736 if (newtd != td) 737 cpu_set_upcall_kse(newtd, newku); 738 newtd->td_mailbox = mbx.km_curthread; 739 newtd->td_flags &= ~TDF_SA; 740 } else { 741 newtd->td_flags |= TDF_SA; 742 } 743 mtx_unlock_spin(&sched_lock); 744 return (0); 745} 746 747/* 748 * Initialize global thread allocation resources. 749 */ 750void 751threadinit(void) 752{ 753 754 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 755 thread_ctor, thread_dtor, thread_init, thread_fini, 756 UMA_ALIGN_CACHE, 0); 757 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 758 NULL, NULL, ksegrp_init, NULL, 759 UMA_ALIGN_CACHE, 0); 760 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 761 NULL, NULL, kse_init, NULL, 762 UMA_ALIGN_CACHE, 0); 763 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 764 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 765} 766 767/* 768 * Stash an embarasingly extra thread into the zombie thread queue. 769 */ 770void 771thread_stash(struct thread *td) 772{ 773 mtx_lock_spin(&kse_zombie_lock); 774 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 775 mtx_unlock_spin(&kse_zombie_lock); 776} 777 778/* 779 * Stash an embarasingly extra kse into the zombie kse queue. 780 */ 781void 782kse_stash(struct kse *ke) 783{ 784 mtx_lock_spin(&kse_zombie_lock); 785 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 786 mtx_unlock_spin(&kse_zombie_lock); 787} 788 789/* 790 * Stash an embarasingly extra upcall into the zombie upcall queue. 791 */ 792 793void 794upcall_stash(struct kse_upcall *ku) 795{ 796 mtx_lock_spin(&kse_zombie_lock); 797 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 798 mtx_unlock_spin(&kse_zombie_lock); 799} 800 801/* 802 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 803 */ 804void 805ksegrp_stash(struct ksegrp *kg) 806{ 807 mtx_lock_spin(&kse_zombie_lock); 808 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 809 mtx_unlock_spin(&kse_zombie_lock); 810} 811 812/* 813 * Reap zombie kse resource. 814 */ 815void 816thread_reap(void) 817{ 818 struct thread *td_first, *td_next; 819 struct kse *ke_first, *ke_next; 820 struct ksegrp *kg_first, * kg_next; 821 struct kse_upcall *ku_first, *ku_next; 822 823 /* 824 * Don't even bother to lock if none at this instant, 825 * we really don't care about the next instant.. 826 */ 827 if ((!TAILQ_EMPTY(&zombie_threads)) 828 || (!TAILQ_EMPTY(&zombie_kses)) 829 || (!TAILQ_EMPTY(&zombie_ksegrps)) 830 || (!TAILQ_EMPTY(&zombie_upcalls))) { 831 mtx_lock_spin(&kse_zombie_lock); 832 td_first = TAILQ_FIRST(&zombie_threads); 833 ke_first = TAILQ_FIRST(&zombie_kses); 834 kg_first = TAILQ_FIRST(&zombie_ksegrps); 835 ku_first = TAILQ_FIRST(&zombie_upcalls); 836 if (td_first) 837 TAILQ_INIT(&zombie_threads); 838 if (ke_first) 839 TAILQ_INIT(&zombie_kses); 840 if (kg_first) 841 TAILQ_INIT(&zombie_ksegrps); 842 if (ku_first) 843 TAILQ_INIT(&zombie_upcalls); 844 mtx_unlock_spin(&kse_zombie_lock); 845 while (td_first) { 846 td_next = TAILQ_NEXT(td_first, td_runq); 847 if (td_first->td_ucred) 848 crfree(td_first->td_ucred); 849 thread_free(td_first); 850 td_first = td_next; 851 } 852 while (ke_first) { 853 ke_next = TAILQ_NEXT(ke_first, ke_procq); 854 kse_free(ke_first); 855 ke_first = ke_next; 856 } 857 while (kg_first) { 858 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 859 ksegrp_free(kg_first); 860 kg_first = kg_next; 861 } 862 while (ku_first) { 863 ku_next = TAILQ_NEXT(ku_first, ku_link); 864 upcall_free(ku_first); 865 ku_first = ku_next; 866 } 867 } 868} 869 870/* 871 * Allocate a ksegrp. 872 */ 873struct ksegrp * 874ksegrp_alloc(void) 875{ 876 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 877} 878 879/* 880 * Allocate a kse. 881 */ 882struct kse * 883kse_alloc(void) 884{ 885 return (uma_zalloc(kse_zone, M_WAITOK)); 886} 887 888/* 889 * Allocate a thread. 890 */ 891struct thread * 892thread_alloc(void) 893{ 894 thread_reap(); /* check if any zombies to get */ 895 return (uma_zalloc(thread_zone, M_WAITOK)); 896} 897 898/* 899 * Deallocate a ksegrp. 900 */ 901void 902ksegrp_free(struct ksegrp *td) 903{ 904 uma_zfree(ksegrp_zone, td); 905} 906 907/* 908 * Deallocate a kse. 909 */ 910void 911kse_free(struct kse *td) 912{ 913 uma_zfree(kse_zone, td); 914} 915 916/* 917 * Deallocate a thread. 918 */ 919void 920thread_free(struct thread *td) 921{ 922 923 cpu_thread_clean(td); 924 uma_zfree(thread_zone, td); 925} 926 927/* 928 * Store the thread context in the UTS's mailbox. 929 * then add the mailbox at the head of a list we are building in user space. 930 * The list is anchored in the ksegrp structure. 931 */ 932int 933thread_export_context(struct thread *td) 934{ 935 struct proc *p; 936 struct ksegrp *kg; 937 uintptr_t mbx; 938 void *addr; 939 int error = 0, temp; 940 mcontext_t mc; 941 942 p = td->td_proc; 943 kg = td->td_ksegrp; 944 945 /* Export the user/machine context. */ 946 get_mcontext(td, &mc, 0); 947 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); 948 error = copyout(&mc, addr, sizeof(mcontext_t)); 949 if (error) 950 goto bad; 951 952 /* Exports clock ticks in kernel mode */ 953 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 954 temp = fuword(addr) + td->td_usticks; 955 if (suword(addr, temp)) { 956 error = EFAULT; 957 goto bad; 958 } 959 960 /* Get address in latest mbox of list pointer */ 961 addr = (void *)(&td->td_mailbox->tm_next); 962 /* 963 * Put the saved address of the previous first 964 * entry into this one 965 */ 966 for (;;) { 967 mbx = (uintptr_t)kg->kg_completed; 968 if (suword(addr, mbx)) { 969 error = EFAULT; 970 goto bad; 971 } 972 PROC_LOCK(p); 973 if (mbx == (uintptr_t)kg->kg_completed) { 974 kg->kg_completed = td->td_mailbox; 975 /* 976 * The thread context may be taken away by 977 * other upcall threads when we unlock 978 * process lock. it's no longer valid to 979 * use it again in any other places. 980 */ 981 td->td_mailbox = NULL; 982 PROC_UNLOCK(p); 983 break; 984 } 985 PROC_UNLOCK(p); 986 } 987 td->td_usticks = 0; 988 return (0); 989 990bad: 991 PROC_LOCK(p); 992 psignal(p, SIGSEGV); 993 PROC_UNLOCK(p); 994 /* The mailbox is bad, don't use it */ 995 td->td_mailbox = NULL; 996 td->td_usticks = 0; 997 return (error); 998} 999 1000/* 1001 * Take the list of completed mailboxes for this KSEGRP and put them on this 1002 * upcall's mailbox as it's the next one going up. 1003 */ 1004static int 1005thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1006{ 1007 struct proc *p = kg->kg_proc; 1008 void *addr; 1009 uintptr_t mbx; 1010 1011 addr = (void *)(&ku->ku_mailbox->km_completed); 1012 for (;;) { 1013 mbx = (uintptr_t)kg->kg_completed; 1014 if (suword(addr, mbx)) { 1015 PROC_LOCK(p); 1016 psignal(p, SIGSEGV); 1017 PROC_UNLOCK(p); 1018 return (EFAULT); 1019 } 1020 PROC_LOCK(p); 1021 if (mbx == (uintptr_t)kg->kg_completed) { 1022 kg->kg_completed = NULL; 1023 PROC_UNLOCK(p); 1024 break; 1025 } 1026 PROC_UNLOCK(p); 1027 } 1028 return (0); 1029} 1030 1031/* 1032 * This function should be called at statclock interrupt time 1033 */ 1034int 1035thread_statclock(int user) 1036{ 1037 struct thread *td = curthread; 1038 struct ksegrp *kg = td->td_ksegrp; 1039 1040 if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA)) 1041 return (0); 1042 if (user) { 1043 /* Current always do via ast() */ 1044 mtx_lock_spin(&sched_lock); 1045 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1046 mtx_unlock_spin(&sched_lock); 1047 td->td_uuticks++; 1048 } else { 1049 if (td->td_mailbox != NULL) 1050 td->td_usticks++; 1051 else { 1052 /* XXXKSE 1053 * We will call thread_user_enter() for every 1054 * kernel entry in future, so if the thread mailbox 1055 * is NULL, it must be a UTS kernel, don't account 1056 * clock ticks for it. 1057 */ 1058 } 1059 } 1060 return (0); 1061} 1062 1063/* 1064 * Export state clock ticks for userland 1065 */ 1066static int 1067thread_update_usr_ticks(struct thread *td, int user) 1068{ 1069 struct proc *p = td->td_proc; 1070 struct kse_thr_mailbox *tmbx; 1071 struct kse_upcall *ku; 1072 struct ksegrp *kg; 1073 caddr_t addr; 1074 uint uticks; 1075 1076 if ((ku = td->td_upcall) == NULL) 1077 return (-1); 1078 1079 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1080 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1081 return (-1); 1082 if (user) { 1083 uticks = td->td_uuticks; 1084 td->td_uuticks = 0; 1085 addr = (caddr_t)&tmbx->tm_uticks; 1086 } else { 1087 uticks = td->td_usticks; 1088 td->td_usticks = 0; 1089 addr = (caddr_t)&tmbx->tm_sticks; 1090 } 1091 if (uticks) { 1092 if (suword(addr, uticks+fuword(addr))) { 1093 PROC_LOCK(p); 1094 psignal(p, SIGSEGV); 1095 PROC_UNLOCK(p); 1096 return (-2); 1097 } 1098 } 1099 kg = td->td_ksegrp; 1100 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1101 mtx_lock_spin(&sched_lock); 1102 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1103 mtx_unlock_spin(&sched_lock); 1104 } 1105 return (0); 1106} 1107 1108/* 1109 * Discard the current thread and exit from its context. 1110 * 1111 * Because we can't free a thread while we're operating under its context, 1112 * push the current thread into our CPU's deadthread holder. This means 1113 * we needn't worry about someone else grabbing our context before we 1114 * do a cpu_throw(). 1115 */ 1116void 1117thread_exit(void) 1118{ 1119 struct thread *td; 1120 struct kse *ke; 1121 struct proc *p; 1122 struct ksegrp *kg; 1123 1124 td = curthread; 1125 kg = td->td_ksegrp; 1126 p = td->td_proc; 1127 ke = td->td_kse; 1128 1129 mtx_assert(&sched_lock, MA_OWNED); 1130 KASSERT(p != NULL, ("thread exiting without a process")); 1131 KASSERT(ke != NULL, ("thread exiting without a kse")); 1132 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1133 PROC_LOCK_ASSERT(p, MA_OWNED); 1134 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1135 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1136 1137 if (td->td_standin != NULL) { 1138 thread_stash(td->td_standin); 1139 td->td_standin = NULL; 1140 } 1141 1142 cpu_thread_exit(td); /* XXXSMP */ 1143 1144 /* 1145 * The last thread is left attached to the process 1146 * So that the whole bundle gets recycled. Skip 1147 * all this stuff. 1148 */ 1149 if (p->p_numthreads > 1) { 1150 thread_unlink(td); 1151 if (p->p_maxthrwaits) 1152 wakeup(&p->p_numthreads); 1153 /* 1154 * The test below is NOT true if we are the 1155 * sole exiting thread. P_STOPPED_SNGL is unset 1156 * in exit1() after it is the only survivor. 1157 */ 1158 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1159 if (p->p_numthreads == p->p_suspcount) { 1160 thread_unsuspend_one(p->p_singlethread); 1161 } 1162 } 1163 1164 /* 1165 * Because each upcall structure has an owner thread, 1166 * owner thread exits only when process is in exiting 1167 * state, so upcall to userland is no longer needed, 1168 * deleting upcall structure is safe here. 1169 * So when all threads in a group is exited, all upcalls 1170 * in the group should be automatically freed. 1171 */ 1172 if (td->td_upcall) 1173 upcall_remove(td); 1174 1175 ke->ke_state = KES_UNQUEUED; 1176 ke->ke_thread = NULL; 1177 /* 1178 * Decide what to do with the KSE attached to this thread. 1179 */ 1180 if (ke->ke_flags & KEF_EXIT) 1181 kse_unlink(ke); 1182 else 1183 kse_reassign(ke); 1184 PROC_UNLOCK(p); 1185 td->td_kse = NULL; 1186 td->td_state = TDS_INACTIVE; 1187#if 0 1188 td->td_proc = NULL; 1189#endif 1190 td->td_ksegrp = NULL; 1191 td->td_last_kse = NULL; 1192 PCPU_SET(deadthread, td); 1193 } else { 1194 PROC_UNLOCK(p); 1195 } 1196 /* XXX Shouldn't cpu_throw() here. */ 1197 mtx_assert(&sched_lock, MA_OWNED); 1198#if !defined(__alpha__) && !defined(__powerpc__) 1199 cpu_throw(td, choosethread()); 1200#else 1201 cpu_throw(); 1202#endif 1203 panic("I'm a teapot!"); 1204 /* NOTREACHED */ 1205} 1206 1207/* 1208 * Do any thread specific cleanups that may be needed in wait() 1209 * called with Giant held, proc and schedlock not held. 1210 */ 1211void 1212thread_wait(struct proc *p) 1213{ 1214 struct thread *td; 1215 1216 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1217 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1218 FOREACH_THREAD_IN_PROC(p, td) { 1219 if (td->td_standin != NULL) { 1220 thread_free(td->td_standin); 1221 td->td_standin = NULL; 1222 } 1223 cpu_thread_clean(td); 1224 } 1225 thread_reap(); /* check for zombie threads etc. */ 1226} 1227 1228/* 1229 * Link a thread to a process. 1230 * set up anything that needs to be initialized for it to 1231 * be used by the process. 1232 * 1233 * Note that we do not link to the proc's ucred here. 1234 * The thread is linked as if running but no KSE assigned. 1235 */ 1236void 1237thread_link(struct thread *td, struct ksegrp *kg) 1238{ 1239 struct proc *p; 1240 1241 p = kg->kg_proc; 1242 td->td_state = TDS_INACTIVE; 1243 td->td_proc = p; 1244 td->td_ksegrp = kg; 1245 td->td_last_kse = NULL; 1246 td->td_flags = 0; 1247 td->td_kse = NULL; 1248 1249 LIST_INIT(&td->td_contested); 1250 callout_init(&td->td_slpcallout, 1); 1251 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1252 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1253 p->p_numthreads++; 1254 kg->kg_numthreads++; 1255} 1256 1257void 1258thread_unlink(struct thread *td) 1259{ 1260 struct proc *p = td->td_proc; 1261 struct ksegrp *kg = td->td_ksegrp; 1262 1263 mtx_assert(&sched_lock, MA_OWNED); 1264 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1265 p->p_numthreads--; 1266 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1267 kg->kg_numthreads--; 1268 /* could clear a few other things here */ 1269} 1270 1271/* 1272 * Purge a ksegrp resource. When a ksegrp is preparing to 1273 * exit, it calls this function. 1274 */ 1275static void 1276kse_purge_group(struct thread *td) 1277{ 1278 struct ksegrp *kg; 1279 struct kse *ke; 1280 1281 kg = td->td_ksegrp; 1282 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1283 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1284 KASSERT(ke->ke_state == KES_IDLE, 1285 ("%s: wrong idle KSE state", __func__)); 1286 kse_unlink(ke); 1287 } 1288 KASSERT((kg->kg_kses == 1), 1289 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1290 KASSERT((kg->kg_numupcalls == 0), 1291 ("%s: ksegrp still has %d upcall datas", 1292 __func__, kg->kg_numupcalls)); 1293} 1294 1295/* 1296 * Purge a process's KSE resource. When a process is preparing to 1297 * exit, it calls kse_purge to release any extra KSE resources in 1298 * the process. 1299 */ 1300static void 1301kse_purge(struct proc *p, struct thread *td) 1302{ 1303 struct ksegrp *kg; 1304 struct kse *ke; 1305 1306 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1307 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1308 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1309 p->p_numksegrps--; 1310 /* 1311 * There is no ownership for KSE, after all threads 1312 * in the group exited, it is possible that some KSEs 1313 * were left in idle queue, gc them now. 1314 */ 1315 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1316 KASSERT(ke->ke_state == KES_IDLE, 1317 ("%s: wrong idle KSE state", __func__)); 1318 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1319 kg->kg_idle_kses--; 1320 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1321 kg->kg_kses--; 1322 kse_stash(ke); 1323 } 1324 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1325 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1326 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1327 KASSERT((kg->kg_numupcalls == 0), 1328 ("%s: ksegrp still has %d upcall datas", 1329 __func__, kg->kg_numupcalls)); 1330 1331 if (kg != td->td_ksegrp) 1332 ksegrp_stash(kg); 1333 } 1334 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1335 p->p_numksegrps++; 1336} 1337 1338/* 1339 * This function is intended to be used to initialize a spare thread 1340 * for upcall. Initialize thread's large data area outside sched_lock 1341 * for thread_schedule_upcall(). 1342 */ 1343void 1344thread_alloc_spare(struct thread *td, struct thread *spare) 1345{ 1346 if (td->td_standin) 1347 return; 1348 if (spare == NULL) 1349 spare = thread_alloc(); 1350 td->td_standin = spare; 1351 bzero(&spare->td_startzero, 1352 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1353 spare->td_proc = td->td_proc; 1354 spare->td_ucred = crhold(td->td_ucred); 1355} 1356 1357/* 1358 * Create a thread and schedule it for upcall on the KSE given. 1359 * Use our thread's standin so that we don't have to allocate one. 1360 */ 1361struct thread * 1362thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1363{ 1364 struct thread *td2; 1365 1366 mtx_assert(&sched_lock, MA_OWNED); 1367 1368 /* 1369 * Schedule an upcall thread on specified kse_upcall, 1370 * the kse_upcall must be free. 1371 * td must have a spare thread. 1372 */ 1373 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1374 if ((td2 = td->td_standin) != NULL) { 1375 td->td_standin = NULL; 1376 } else { 1377 panic("no reserve thread when scheduling an upcall"); 1378 return (NULL); 1379 } 1380 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1381 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1382 bcopy(&td->td_startcopy, &td2->td_startcopy, 1383 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1384 thread_link(td2, ku->ku_ksegrp); 1385 /* inherit blocked thread's context */ 1386 cpu_set_upcall(td2, td); 1387 /* Let the new thread become owner of the upcall */ 1388 ku->ku_owner = td2; 1389 td2->td_upcall = ku; 1390 td2->td_flags = TDF_SA; 1391 td2->td_pflags = TDP_UPCALLING; 1392 td2->td_kse = NULL; 1393 td2->td_state = TDS_CAN_RUN; 1394 td2->td_inhibitors = 0; 1395 setrunqueue(td2); 1396 return (td2); /* bogus.. should be a void function */ 1397} 1398 1399void 1400thread_signal_add(struct thread *td, int sig) 1401{ 1402 struct kse_upcall *ku; 1403 struct proc *p; 1404 sigset_t ss; 1405 int error; 1406 1407 p = td->td_proc; 1408 PROC_LOCK_ASSERT(p, MA_OWNED); 1409 mtx_assert(&p->p_sigacts->ps_mtx, MA_OWNED); 1410 td = curthread; 1411 ku = td->td_upcall; 1412 mtx_unlock(&p->p_sigacts->ps_mtx); 1413 PROC_UNLOCK(p); 1414 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1415 if (error) 1416 goto error; 1417 1418 SIGADDSET(ss, sig); 1419 1420 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1421 if (error) 1422 goto error; 1423 1424 PROC_LOCK(p); 1425 mtx_lock(&p->p_sigacts->ps_mtx); 1426 return; 1427error: 1428 PROC_LOCK(p); 1429 sigexit(td, SIGILL); 1430} 1431 1432/* 1433 * Schedule an upcall to notify a KSE process recieved signals. 1434 * 1435 */ 1436void 1437thread_signal_upcall(struct thread *td) 1438{ 1439 td->td_pflags |= TDP_UPCALLING; 1440 1441 return; 1442} 1443 1444void 1445thread_switchout(struct thread *td) 1446{ 1447 struct kse_upcall *ku; 1448 1449 mtx_assert(&sched_lock, MA_OWNED); 1450 1451 /* 1452 * If the outgoing thread is in threaded group and has never 1453 * scheduled an upcall, decide whether this is a short 1454 * or long term event and thus whether or not to schedule 1455 * an upcall. 1456 * If it is a short term event, just suspend it in 1457 * a way that takes its KSE with it. 1458 * Select the events for which we want to schedule upcalls. 1459 * For now it's just sleep. 1460 * XXXKSE eventually almost any inhibition could do. 1461 */ 1462 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1463 /* 1464 * Release ownership of upcall, and schedule an upcall 1465 * thread, this new upcall thread becomes the owner of 1466 * the upcall structure. 1467 */ 1468 ku = td->td_upcall; 1469 ku->ku_owner = NULL; 1470 td->td_upcall = NULL; 1471 td->td_flags &= ~TDF_CAN_UNBIND; 1472 thread_schedule_upcall(td, ku); 1473 } 1474} 1475 1476/* 1477 * Setup done on the thread when it enters the kernel. 1478 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1479 */ 1480void 1481thread_user_enter(struct proc *p, struct thread *td) 1482{ 1483 struct ksegrp *kg; 1484 struct kse_upcall *ku; 1485 struct kse_thr_mailbox *tmbx; 1486 1487 kg = td->td_ksegrp; 1488 1489 /* 1490 * First check that we shouldn't just abort. 1491 * But check if we are the single thread first! 1492 */ 1493 if (p->p_flag & P_SINGLE_EXIT) { 1494 PROC_LOCK(p); 1495 mtx_lock_spin(&sched_lock); 1496 thread_stopped(p); 1497 thread_exit(); 1498 /* NOTREACHED */ 1499 } 1500 1501 /* 1502 * If we are doing a syscall in a KSE environment, 1503 * note where our mailbox is. There is always the 1504 * possibility that we could do this lazily (in kse_reassign()), 1505 * but for now do it every time. 1506 */ 1507 kg = td->td_ksegrp; 1508 if (td->td_flags & TDF_SA) { 1509 ku = td->td_upcall; 1510 KASSERT(ku, ("%s: no upcall owned", __func__)); 1511 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1512 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); 1513 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags); 1514 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1515 if ((tmbx == NULL) || (tmbx == (void *)-1)) { 1516 td->td_mailbox = NULL; 1517 } else { 1518 td->td_mailbox = tmbx; 1519 if (td->td_standin == NULL) 1520 thread_alloc_spare(td, NULL); 1521 mtx_lock_spin(&sched_lock); 1522 if (ku->ku_mflags & KMF_NOUPCALL) 1523 td->td_flags &= ~TDF_CAN_UNBIND; 1524 else 1525 td->td_flags |= TDF_CAN_UNBIND; 1526 mtx_unlock_spin(&sched_lock); 1527 } 1528 } 1529} 1530 1531/* 1532 * The extra work we go through if we are a threaded process when we 1533 * return to userland. 1534 * 1535 * If we are a KSE process and returning to user mode, check for 1536 * extra work to do before we return (e.g. for more syscalls 1537 * to complete first). If we were in a critical section, we should 1538 * just return to let it finish. Same if we were in the UTS (in 1539 * which case the mailbox's context's busy indicator will be set). 1540 * The only traps we suport will have set the mailbox. 1541 * We will clear it here. 1542 */ 1543int 1544thread_userret(struct thread *td, struct trapframe *frame) 1545{ 1546 int error = 0, upcalls, uts_crit; 1547 struct kse_upcall *ku; 1548 struct ksegrp *kg, *kg2; 1549 struct proc *p; 1550 struct timespec ts; 1551 1552 p = td->td_proc; 1553 kg = td->td_ksegrp; 1554 ku = td->td_upcall; 1555 1556 /* Nothing to do with bound thread */ 1557 if (!(td->td_flags & TDF_SA)) 1558 return (0); 1559 1560 /* 1561 * Stat clock interrupt hit in userland, it 1562 * is returning from interrupt, charge thread's 1563 * userland time for UTS. 1564 */ 1565 if (td->td_flags & TDF_USTATCLOCK) { 1566 thread_update_usr_ticks(td, 1); 1567 mtx_lock_spin(&sched_lock); 1568 td->td_flags &= ~TDF_USTATCLOCK; 1569 mtx_unlock_spin(&sched_lock); 1570 if (kg->kg_completed || 1571 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1572 thread_user_enter(p, td); 1573 } 1574 1575 uts_crit = (td->td_mailbox == NULL); 1576 /* 1577 * Optimisation: 1578 * This thread has not started any upcall. 1579 * If there is no work to report other than ourself, 1580 * then it can return direct to userland. 1581 */ 1582 if (TD_CAN_UNBIND(td)) { 1583 mtx_lock_spin(&sched_lock); 1584 td->td_flags &= ~TDF_CAN_UNBIND; 1585 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1586 (kg->kg_completed == NULL) && 1587 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1588 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { 1589 mtx_unlock_spin(&sched_lock); 1590 thread_update_usr_ticks(td, 0); 1591 nanotime(&ts); 1592 error = copyout(&ts, 1593 (caddr_t)&ku->ku_mailbox->km_timeofday, 1594 sizeof(ts)); 1595 td->td_mailbox = 0; 1596 ku->ku_mflags = 0; 1597 if (error) 1598 goto out; 1599 return (0); 1600 } 1601 mtx_unlock_spin(&sched_lock); 1602 error = thread_export_context(td); 1603 if (error) { 1604 /* 1605 * Failing to do the KSE operation just defaults 1606 * back to synchonous operation, so just return from 1607 * the syscall. 1608 */ 1609 goto out; 1610 } 1611 /* 1612 * There is something to report, and we own an upcall 1613 * strucuture, we can go to userland. 1614 * Turn ourself into an upcall thread. 1615 */ 1616 td->td_pflags |= TDP_UPCALLING; 1617 } else if (td->td_mailbox && (ku == NULL)) { 1618 error = thread_export_context(td); 1619 /* possibly upcall with error? */ 1620 PROC_LOCK(p); 1621 /* 1622 * There are upcall threads waiting for 1623 * work to do, wake one of them up. 1624 * XXXKSE Maybe wake all of them up. 1625 */ 1626 if (!error && kg->kg_upsleeps) 1627 wakeup_one(&kg->kg_completed); 1628 mtx_lock_spin(&sched_lock); 1629 thread_stopped(p); 1630 thread_exit(); 1631 /* NOTREACHED */ 1632 } 1633 1634 KASSERT(ku != NULL, ("upcall is NULL\n")); 1635 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1636 1637 if (p->p_numthreads > max_threads_per_proc) { 1638 max_threads_hits++; 1639 PROC_LOCK(p); 1640 mtx_lock_spin(&sched_lock); 1641 p->p_maxthrwaits++; 1642 while (p->p_numthreads > max_threads_per_proc) { 1643 upcalls = 0; 1644 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1645 if (kg2->kg_numupcalls == 0) 1646 upcalls++; 1647 else 1648 upcalls += kg2->kg_numupcalls; 1649 } 1650 if (upcalls >= max_threads_per_proc) 1651 break; 1652 mtx_unlock_spin(&sched_lock); 1653 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1654 "maxthreads", NULL)) { 1655 mtx_lock_spin(&sched_lock); 1656 break; 1657 } else { 1658 mtx_lock_spin(&sched_lock); 1659 } 1660 } 1661 p->p_maxthrwaits--; 1662 mtx_unlock_spin(&sched_lock); 1663 PROC_UNLOCK(p); 1664 } 1665 1666 if (td->td_pflags & TDP_UPCALLING) { 1667 uts_crit = 0; 1668 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1669 /* 1670 * There is no more work to do and we are going to ride 1671 * this thread up to userland as an upcall. 1672 * Do the last parts of the setup needed for the upcall. 1673 */ 1674 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1675 td, td->td_proc->p_pid, td->td_proc->p_comm); 1676 1677 td->td_pflags &= ~TDP_UPCALLING; 1678 if (ku->ku_flags & KUF_DOUPCALL) { 1679 mtx_lock_spin(&sched_lock); 1680 ku->ku_flags &= ~KUF_DOUPCALL; 1681 mtx_unlock_spin(&sched_lock); 1682 } 1683 /* 1684 * Set user context to the UTS 1685 */ 1686 if (!(ku->ku_mflags & KMF_NOUPCALL)) { 1687 cpu_set_upcall_kse(td, ku); 1688 error = suword(&ku->ku_mailbox->km_curthread, 0); 1689 if (error) 1690 goto out; 1691 } 1692 1693 /* 1694 * Unhook the list of completed threads. 1695 * anything that completes after this gets to 1696 * come in next time. 1697 * Put the list of completed thread mailboxes on 1698 * this KSE's mailbox. 1699 */ 1700 if (!(ku->ku_mflags & KMF_NOCOMPLETED) && 1701 (error = thread_link_mboxes(kg, ku)) != 0) 1702 goto out; 1703 } 1704 if (!uts_crit) { 1705 nanotime(&ts); 1706 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); 1707 } 1708 1709out: 1710 if (error) { 1711 /* 1712 * Things are going to be so screwed we should just kill 1713 * the process. 1714 * how do we do that? 1715 */ 1716 PROC_LOCK(td->td_proc); 1717 psignal(td->td_proc, SIGSEGV); 1718 PROC_UNLOCK(td->td_proc); 1719 } else { 1720 /* 1721 * Optimisation: 1722 * Ensure that we have a spare thread available, 1723 * for when we re-enter the kernel. 1724 */ 1725 if (td->td_standin == NULL) 1726 thread_alloc_spare(td, NULL); 1727 } 1728 1729 ku->ku_mflags = 0; 1730 /* 1731 * Clear thread mailbox first, then clear system tick count. 1732 * The order is important because thread_statclock() use 1733 * mailbox pointer to see if it is an userland thread or 1734 * an UTS kernel thread. 1735 */ 1736 td->td_mailbox = NULL; 1737 td->td_usticks = 0; 1738 return (error); /* go sync */ 1739} 1740 1741/* 1742 * Enforce single-threading. 1743 * 1744 * Returns 1 if the caller must abort (another thread is waiting to 1745 * exit the process or similar). Process is locked! 1746 * Returns 0 when you are successfully the only thread running. 1747 * A process has successfully single threaded in the suspend mode when 1748 * There are no threads in user mode. Threads in the kernel must be 1749 * allowed to continue until they get to the user boundary. They may even 1750 * copy out their return values and data before suspending. They may however be 1751 * accellerated in reaching the user boundary as we will wake up 1752 * any sleeping threads that are interruptable. (PCATCH). 1753 */ 1754int 1755thread_single(int force_exit) 1756{ 1757 struct thread *td; 1758 struct thread *td2; 1759 struct proc *p; 1760 1761 td = curthread; 1762 p = td->td_proc; 1763 mtx_assert(&Giant, MA_OWNED); 1764 PROC_LOCK_ASSERT(p, MA_OWNED); 1765 KASSERT((td != NULL), ("curthread is NULL")); 1766 1767 if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1) 1768 return (0); 1769 1770 /* Is someone already single threading? */ 1771 if (p->p_singlethread) 1772 return (1); 1773 1774 if (force_exit == SINGLE_EXIT) { 1775 p->p_flag |= P_SINGLE_EXIT; 1776 } else 1777 p->p_flag &= ~P_SINGLE_EXIT; 1778 p->p_flag |= P_STOPPED_SINGLE; 1779 mtx_lock_spin(&sched_lock); 1780 p->p_singlethread = td; 1781 while ((p->p_numthreads - p->p_suspcount) != 1) { 1782 FOREACH_THREAD_IN_PROC(p, td2) { 1783 if (td2 == td) 1784 continue; 1785 td2->td_flags |= TDF_ASTPENDING; 1786 if (TD_IS_INHIBITED(td2)) { 1787 if (force_exit == SINGLE_EXIT) { 1788 if (TD_IS_SUSPENDED(td2)) { 1789 thread_unsuspend_one(td2); 1790 } 1791 if (TD_ON_SLEEPQ(td2) && 1792 (td2->td_flags & TDF_SINTR)) { 1793 if (td2->td_flags & TDF_CVWAITQ) 1794 cv_abort(td2); 1795 else 1796 abortsleep(td2); 1797 } 1798 } else { 1799 if (TD_IS_SUSPENDED(td2)) 1800 continue; 1801 /* 1802 * maybe other inhibitted states too? 1803 * XXXKSE Is it totally safe to 1804 * suspend a non-interruptable thread? 1805 */ 1806 if (td2->td_inhibitors & 1807 (TDI_SLEEPING | TDI_SWAPPED)) 1808 thread_suspend_one(td2); 1809 } 1810 } 1811 } 1812 /* 1813 * Maybe we suspended some threads.. was it enough? 1814 */ 1815 if ((p->p_numthreads - p->p_suspcount) == 1) 1816 break; 1817 1818 /* 1819 * Wake us up when everyone else has suspended. 1820 * In the mean time we suspend as well. 1821 */ 1822 thread_suspend_one(td); 1823 DROP_GIANT(); 1824 PROC_UNLOCK(p); 1825 p->p_stats->p_ru.ru_nvcsw++; 1826 mi_switch(); 1827 mtx_unlock_spin(&sched_lock); 1828 PICKUP_GIANT(); 1829 PROC_LOCK(p); 1830 mtx_lock_spin(&sched_lock); 1831 } 1832 if (force_exit == SINGLE_EXIT) { 1833 if (td->td_upcall) 1834 upcall_remove(td); 1835 kse_purge(p, td); 1836 } 1837 mtx_unlock_spin(&sched_lock); 1838 return (0); 1839} 1840 1841/* 1842 * Called in from locations that can safely check to see 1843 * whether we have to suspend or at least throttle for a 1844 * single-thread event (e.g. fork). 1845 * 1846 * Such locations include userret(). 1847 * If the "return_instead" argument is non zero, the thread must be able to 1848 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1849 * 1850 * The 'return_instead' argument tells the function if it may do a 1851 * thread_exit() or suspend, or whether the caller must abort and back 1852 * out instead. 1853 * 1854 * If the thread that set the single_threading request has set the 1855 * P_SINGLE_EXIT bit in the process flags then this call will never return 1856 * if 'return_instead' is false, but will exit. 1857 * 1858 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1859 *---------------+--------------------+--------------------- 1860 * 0 | returns 0 | returns 0 or 1 1861 * | when ST ends | immediatly 1862 *---------------+--------------------+--------------------- 1863 * 1 | thread exits | returns 1 1864 * | | immediatly 1865 * 0 = thread_exit() or suspension ok, 1866 * other = return error instead of stopping the thread. 1867 * 1868 * While a full suspension is under effect, even a single threading 1869 * thread would be suspended if it made this call (but it shouldn't). 1870 * This call should only be made from places where 1871 * thread_exit() would be safe as that may be the outcome unless 1872 * return_instead is set. 1873 */ 1874int 1875thread_suspend_check(int return_instead) 1876{ 1877 struct thread *td; 1878 struct proc *p; 1879 1880 td = curthread; 1881 p = td->td_proc; 1882 PROC_LOCK_ASSERT(p, MA_OWNED); 1883 while (P_SHOULDSTOP(p)) { 1884 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1885 KASSERT(p->p_singlethread != NULL, 1886 ("singlethread not set")); 1887 /* 1888 * The only suspension in action is a 1889 * single-threading. Single threader need not stop. 1890 * XXX Should be safe to access unlocked 1891 * as it can only be set to be true by us. 1892 */ 1893 if (p->p_singlethread == td) 1894 return (0); /* Exempt from stopping. */ 1895 } 1896 if (return_instead) 1897 return (1); 1898 1899 mtx_lock_spin(&sched_lock); 1900 thread_stopped(p); 1901 /* 1902 * If the process is waiting for us to exit, 1903 * this thread should just suicide. 1904 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1905 */ 1906 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1907 while (mtx_owned(&Giant)) 1908 mtx_unlock(&Giant); 1909 if (p->p_flag & P_SA) 1910 thread_exit(); 1911 else 1912 thr_exit1(); 1913 } 1914 1915 /* 1916 * When a thread suspends, it just 1917 * moves to the processes's suspend queue 1918 * and stays there. 1919 */ 1920 thread_suspend_one(td); 1921 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1922 if (p->p_numthreads == p->p_suspcount) { 1923 thread_unsuspend_one(p->p_singlethread); 1924 } 1925 } 1926 DROP_GIANT(); 1927 PROC_UNLOCK(p); 1928 p->p_stats->p_ru.ru_nivcsw++; 1929 mi_switch(); 1930 mtx_unlock_spin(&sched_lock); 1931 PICKUP_GIANT(); 1932 PROC_LOCK(p); 1933 } 1934 return (0); 1935} 1936 1937void 1938thread_suspend_one(struct thread *td) 1939{ 1940 struct proc *p = td->td_proc; 1941 1942 mtx_assert(&sched_lock, MA_OWNED); 1943 PROC_LOCK_ASSERT(p, MA_OWNED); 1944 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 1945 p->p_suspcount++; 1946 TD_SET_SUSPENDED(td); 1947 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1948 /* 1949 * Hack: If we are suspending but are on the sleep queue 1950 * then we are in msleep or the cv equivalent. We 1951 * want to look like we have two Inhibitors. 1952 * May already be set.. doesn't matter. 1953 */ 1954 if (TD_ON_SLEEPQ(td)) 1955 TD_SET_SLEEPING(td); 1956} 1957 1958void 1959thread_unsuspend_one(struct thread *td) 1960{ 1961 struct proc *p = td->td_proc; 1962 1963 mtx_assert(&sched_lock, MA_OWNED); 1964 PROC_LOCK_ASSERT(p, MA_OWNED); 1965 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 1966 TD_CLR_SUSPENDED(td); 1967 p->p_suspcount--; 1968 setrunnable(td); 1969} 1970 1971/* 1972 * Allow all threads blocked by single threading to continue running. 1973 */ 1974void 1975thread_unsuspend(struct proc *p) 1976{ 1977 struct thread *td; 1978 1979 mtx_assert(&sched_lock, MA_OWNED); 1980 PROC_LOCK_ASSERT(p, MA_OWNED); 1981 if (!P_SHOULDSTOP(p)) { 1982 while (( td = TAILQ_FIRST(&p->p_suspended))) { 1983 thread_unsuspend_one(td); 1984 } 1985 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 1986 (p->p_numthreads == p->p_suspcount)) { 1987 /* 1988 * Stopping everything also did the job for the single 1989 * threading request. Now we've downgraded to single-threaded, 1990 * let it continue. 1991 */ 1992 thread_unsuspend_one(p->p_singlethread); 1993 } 1994} 1995 1996void 1997thread_single_end(void) 1998{ 1999 struct thread *td; 2000 struct proc *p; 2001 2002 td = curthread; 2003 p = td->td_proc; 2004 PROC_LOCK_ASSERT(p, MA_OWNED); 2005 p->p_flag &= ~P_STOPPED_SINGLE; 2006 mtx_lock_spin(&sched_lock); 2007 p->p_singlethread = NULL; 2008 /* 2009 * If there are other threads they mey now run, 2010 * unless of course there is a blanket 'stop order' 2011 * on the process. The single threader must be allowed 2012 * to continue however as this is a bad place to stop. 2013 */ 2014 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2015 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2016 thread_unsuspend_one(td); 2017 } 2018 } 2019 mtx_unlock_spin(&sched_lock); 2020} 2021 2022 2023