31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/mutex.h> 38#include <sys/proc.h> 39#include <sys/smp.h> 40#include <sys/sysctl.h> 41#include <sys/sysproto.h> 42#include <sys/filedesc.h> 43#include <sys/sched.h> 44#include <sys/signalvar.h> 45#include <sys/sx.h> 46#include <sys/tty.h> 47#include <sys/user.h> 48#include <sys/jail.h> 49#include <sys/kse.h> 50#include <sys/ktr.h> 51#include <sys/ucontext.h> 52 53#include <vm/vm.h> 54#include <vm/vm_extern.h> 55#include <vm/vm_object.h> 56#include <vm/pmap.h> 57#include <vm/uma.h> 58#include <vm/vm_map.h> 59 60#include <machine/frame.h> 61 62/* 63 * KSEGRP related storage. 64 */ 65static uma_zone_t ksegrp_zone; 66static uma_zone_t kse_zone; 67static uma_zone_t thread_zone; 68static uma_zone_t upcall_zone; 69 70/* DEBUG ONLY */ 71SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 72static int thread_debug = 0; 73SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 74 &thread_debug, 0, "thread debug"); 75 76static int max_threads_per_proc = 150; 77SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 78 &max_threads_per_proc, 0, "Limit on threads per proc"); 79 80static int max_groups_per_proc = 50; 81SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 82 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 83 84static int max_threads_hits; 85SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 86 &max_threads_hits, 0, ""); 87 88static int virtual_cpu; 89 90#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 91 92TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 93TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 94TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 95TAILQ_HEAD(, kse_upcall) zombie_upcalls = 96 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 97struct mtx kse_zombie_lock; 98MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 99 100static void kse_purge(struct proc *p, struct thread *td); 101static void kse_purge_group(struct thread *td); 102static int thread_update_usr_ticks(struct thread *td, int user); 103static void thread_alloc_spare(struct thread *td, struct thread *spare); 104 105static int 106sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 107{ 108 int error, new_val; 109 int def_val; 110 111#ifdef SMP 112 def_val = mp_ncpus; 113#else 114 def_val = 1; 115#endif 116 if (virtual_cpu == 0) 117 new_val = def_val; 118 else 119 new_val = virtual_cpu; 120 error = sysctl_handle_int(oidp, &new_val, 0, req); 121 if (error != 0 || req->newptr == NULL) 122 return (error); 123 if (new_val < 0) 124 return (EINVAL); 125 virtual_cpu = new_val; 126 return (0); 127} 128 129/* DEBUG ONLY */ 130SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 131 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 132 "debug virtual cpus"); 133 134/* 135 * Prepare a thread for use. 136 */ 137static void 138thread_ctor(void *mem, int size, void *arg) 139{ 140 struct thread *td; 141 142 td = (struct thread *)mem; 143 td->td_state = TDS_INACTIVE; 144 td->td_oncpu = NOCPU; 145 td->td_critnest = 1; 146} 147 148/* 149 * Reclaim a thread after use. 150 */ 151static void 152thread_dtor(void *mem, int size, void *arg) 153{ 154 struct thread *td; 155 156 td = (struct thread *)mem; 157 158#ifdef INVARIANTS 159 /* Verify that this thread is in a safe state to free. */ 160 switch (td->td_state) { 161 case TDS_INHIBITED: 162 case TDS_RUNNING: 163 case TDS_CAN_RUN: 164 case TDS_RUNQ: 165 /* 166 * We must never unlink a thread that is in one of 167 * these states, because it is currently active. 168 */ 169 panic("bad state for thread unlinking"); 170 /* NOTREACHED */ 171 case TDS_INACTIVE: 172 break; 173 default: 174 panic("bad thread state"); 175 /* NOTREACHED */ 176 } 177#endif 178} 179 180/* 181 * Initialize type-stable parts of a thread (when newly created). 182 */ 183static void 184thread_init(void *mem, int size) 185{ 186 struct thread *td; 187 188 td = (struct thread *)mem; 189 mtx_lock(&Giant); 190 vm_thread_new(td, 0); 191 mtx_unlock(&Giant); 192 cpu_thread_setup(td); 193 td->td_sched = (struct td_sched *)&td[1]; 194} 195 196/* 197 * Tear down type-stable parts of a thread (just before being discarded). 198 */ 199static void 200thread_fini(void *mem, int size) 201{ 202 struct thread *td; 203 204 td = (struct thread *)mem; 205 vm_thread_dispose(td); 206} 207 208/* 209 * Initialize type-stable parts of a kse (when newly created). 210 */ 211static void 212kse_init(void *mem, int size) 213{ 214 struct kse *ke; 215 216 ke = (struct kse *)mem; 217 ke->ke_sched = (struct ke_sched *)&ke[1]; 218} 219 220/* 221 * Initialize type-stable parts of a ksegrp (when newly created). 222 */ 223static void 224ksegrp_init(void *mem, int size) 225{ 226 struct ksegrp *kg; 227 228 kg = (struct ksegrp *)mem; 229 kg->kg_sched = (struct kg_sched *)&kg[1]; 230} 231 232/* 233 * KSE is linked into kse group. 234 */ 235void 236kse_link(struct kse *ke, struct ksegrp *kg) 237{ 238 struct proc *p = kg->kg_proc; 239 240 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 241 kg->kg_kses++; 242 ke->ke_state = KES_UNQUEUED; 243 ke->ke_proc = p; 244 ke->ke_ksegrp = kg; 245 ke->ke_thread = NULL; 246 ke->ke_oncpu = NOCPU; 247 ke->ke_flags = 0; 248} 249 250void 251kse_unlink(struct kse *ke) 252{ 253 struct ksegrp *kg; 254 255 mtx_assert(&sched_lock, MA_OWNED); 256 kg = ke->ke_ksegrp; 257 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 258 if (ke->ke_state == KES_IDLE) { 259 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 260 kg->kg_idle_kses--; 261 } 262 if (--kg->kg_kses == 0) 263 ksegrp_unlink(kg); 264 /* 265 * Aggregate stats from the KSE 266 */ 267 kse_stash(ke); 268} 269 270void 271ksegrp_link(struct ksegrp *kg, struct proc *p) 272{ 273 274 TAILQ_INIT(&kg->kg_threads); 275 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 276 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 277 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 278 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 279 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 280 kg->kg_proc = p; 281 /* 282 * the following counters are in the -zero- section 283 * and may not need clearing 284 */ 285 kg->kg_numthreads = 0; 286 kg->kg_runnable = 0; 287 kg->kg_kses = 0; 288 kg->kg_runq_kses = 0; /* XXXKSE change name */ 289 kg->kg_idle_kses = 0; 290 kg->kg_numupcalls = 0; 291 /* link it in now that it's consistent */ 292 p->p_numksegrps++; 293 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 294} 295 296void 297ksegrp_unlink(struct ksegrp *kg) 298{ 299 struct proc *p; 300 301 mtx_assert(&sched_lock, MA_OWNED); 302 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 303 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 304 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 305 306 p = kg->kg_proc; 307 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 308 p->p_numksegrps--; 309 /* 310 * Aggregate stats from the KSE 311 */ 312 ksegrp_stash(kg); 313} 314 315struct kse_upcall * 316upcall_alloc(void) 317{ 318 struct kse_upcall *ku; 319 320 ku = uma_zalloc(upcall_zone, M_WAITOK); 321 bzero(ku, sizeof(*ku)); 322 return (ku); 323} 324 325void 326upcall_free(struct kse_upcall *ku) 327{ 328 329 uma_zfree(upcall_zone, ku); 330} 331 332void 333upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 334{ 335 336 mtx_assert(&sched_lock, MA_OWNED); 337 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 338 ku->ku_ksegrp = kg; 339 kg->kg_numupcalls++; 340} 341 342void 343upcall_unlink(struct kse_upcall *ku) 344{ 345 struct ksegrp *kg = ku->ku_ksegrp; 346 347 mtx_assert(&sched_lock, MA_OWNED); 348 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 349 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 350 kg->kg_numupcalls--; 351 upcall_stash(ku); 352} 353 354void 355upcall_remove(struct thread *td) 356{ 357 358 if (td->td_upcall) { 359 td->td_upcall->ku_owner = NULL; 360 upcall_unlink(td->td_upcall); 361 td->td_upcall = 0; 362 } 363} 364 365/* 366 * For a newly created process, 367 * link up all the structures and its initial threads etc. 368 */ 369void 370proc_linkup(struct proc *p, struct ksegrp *kg, 371 struct kse *ke, struct thread *td) 372{ 373 374 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 375 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 376 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 377 p->p_numksegrps = 0; 378 p->p_numthreads = 0; 379 380 ksegrp_link(kg, p); 381 kse_link(ke, kg); 382 thread_link(td, kg); 383} 384 385/* 386struct kse_thr_interrupt_args { 387 struct kse_thr_mailbox * tmbx; 388 int cmd; 389 long data; 390}; 391*/ 392int 393kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 394{ 395 struct proc *p; 396 struct thread *td2; 397 398 p = td->td_proc; 399 if (!(p->p_flag & P_SA)) 400 return (EINVAL); 401 402 switch (uap->cmd) { 403 case KSE_INTR_SENDSIG: 404 if (uap->data < 0 || uap->data > _SIG_MAXSIG) 405 return (EINVAL); 406 case KSE_INTR_INTERRUPT: 407 case KSE_INTR_RESTART: 408 PROC_LOCK(p); 409 mtx_lock_spin(&sched_lock); 410 FOREACH_THREAD_IN_PROC(p, td2) { 411 if (td2->td_mailbox == uap->tmbx) 412 break; 413 } 414 if (td2 == NULL) { 415 mtx_unlock_spin(&sched_lock); 416 PROC_UNLOCK(p); 417 return (ESRCH); 418 } 419 if (uap->cmd == KSE_INTR_SENDSIG) { 420 if (uap->data > 0) { 421 td2->td_flags &= ~TDF_INTERRUPT; 422 mtx_unlock_spin(&sched_lock); 423 tdsignal(td2, (int)uap->data, SIGTARGET_TD); 424 } else { 425 mtx_unlock_spin(&sched_lock); 426 } 427 } else { 428 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING; 429 if (TD_CAN_UNBIND(td2)) 430 td2->td_upcall->ku_flags |= KUF_DOUPCALL; 431 if (uap->cmd == KSE_INTR_INTERRUPT) 432 td2->td_intrval = EINTR; 433 else 434 td2->td_intrval = ERESTART; 435 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 436 if (td2->td_flags & TDF_CVWAITQ) 437 cv_abort(td2); 438 else 439 abortsleep(td2); 440 } 441 mtx_unlock_spin(&sched_lock); 442 } 443 PROC_UNLOCK(p); 444 break; 445 case KSE_INTR_SIGEXIT: 446 if (uap->data < 1 || uap->data > _SIG_MAXSIG) 447 return (EINVAL); 448 PROC_LOCK(p); 449 sigexit(td, (int)uap->data); 450 break; 451 default: 452 return (EINVAL); 453 } 454 return (0); 455} 456 457/* 458struct kse_exit_args { 459 register_t dummy; 460}; 461*/ 462int 463kse_exit(struct thread *td, struct kse_exit_args *uap) 464{ 465 struct proc *p; 466 struct ksegrp *kg; 467 struct kse *ke; 468 struct kse_upcall *ku, *ku2; 469 int error, count; 470 471 p = td->td_proc; 472 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 473 return (EINVAL); 474 kg = td->td_ksegrp; 475 count = 0; 476 PROC_LOCK(p); 477 mtx_lock_spin(&sched_lock); 478 FOREACH_UPCALL_IN_GROUP(kg, ku2) { 479 if (ku2->ku_flags & KUF_EXITING) 480 count++; 481 } 482 if ((kg->kg_numupcalls - count) == 1 && 483 (kg->kg_numthreads > 1)) { 484 mtx_unlock_spin(&sched_lock); 485 PROC_UNLOCK(p); 486 return (EDEADLK); 487 } 488 ku->ku_flags |= KUF_EXITING; 489 mtx_unlock_spin(&sched_lock); 490 PROC_UNLOCK(p); 491 error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); 492 PROC_LOCK(p); 493 if (error) 494 psignal(p, SIGSEGV); 495 mtx_lock_spin(&sched_lock); 496 upcall_remove(td); 497 ke = td->td_kse; 498 if (p->p_numthreads == 1) { 499 kse_purge(p, td); 500 p->p_flag &= ~P_SA; 501 mtx_unlock_spin(&sched_lock); 502 PROC_UNLOCK(p); 503 } else { 504 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 505 kse_purge_group(td); 506 ke->ke_flags |= KEF_EXIT; 507 } 508 thread_stopped(p); 509 thread_exit(); 510 /* NOTREACHED */ 511 } 512 return (0); 513} 514 515/* 516 * Either becomes an upcall or waits for an awakening event and 517 * then becomes an upcall. Only error cases return. 518 */ 519/* 520struct kse_release_args { 521 struct timespec *timeout; 522}; 523*/ 524int 525kse_release(struct thread *td, struct kse_release_args *uap) 526{ 527 struct proc *p; 528 struct ksegrp *kg; 529 struct kse_upcall *ku; 530 struct timespec timeout; 531 struct timeval tv; 532 sigset_t sigset; 533 int error; 534 535 p = td->td_proc; 536 kg = td->td_ksegrp; 537 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 538 return (EINVAL); 539 if (uap->timeout != NULL) { 540 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 541 return (error); 542 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 543 } 544 if (td->td_flags & TDF_SA) 545 td->td_pflags |= TDP_UPCALLING; 546 else { 547 ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags); 548 if (ku->ku_mflags == -1) { 549 PROC_LOCK(p); 550 sigexit(td, SIGSEGV); 551 } 552 } 553 PROC_LOCK(p); 554 if (ku->ku_mflags & KMF_WAITSIGEVENT) { 555 /* UTS wants to wait for signal event */ 556 if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL)) 557 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH, 558 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0)); 559 p->p_flag &= ~P_SIGEVENT; 560 sigset = p->p_siglist; 561 PROC_UNLOCK(p); 562 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught, 563 sizeof(sigset)); 564 } else { 565 if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) { 566 kg->kg_upsleeps++; 567 error = msleep(&kg->kg_completed, &p->p_mtx, 568 PPAUSE|PCATCH, "kserel", 569 (uap->timeout ? tvtohz(&tv) : 0)); 570 kg->kg_upsleeps--; 571 } 572 PROC_UNLOCK(p); 573 } 574 if (ku->ku_flags & KUF_DOUPCALL) { 575 mtx_lock_spin(&sched_lock); 576 ku->ku_flags &= ~KUF_DOUPCALL; 577 mtx_unlock_spin(&sched_lock); 578 } 579 return (0); 580} 581 582/* struct kse_wakeup_args { 583 struct kse_mailbox *mbx; 584}; */ 585int 586kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 587{ 588 struct proc *p; 589 struct ksegrp *kg; 590 struct kse_upcall *ku; 591 struct thread *td2; 592 593 p = td->td_proc; 594 td2 = NULL; 595 ku = NULL; 596 /* KSE-enabled processes only, please. */ 597 if (!(p->p_flag & P_SA)) 598 return (EINVAL); 599 PROC_LOCK(p); 600 mtx_lock_spin(&sched_lock); 601 if (uap->mbx) { 602 FOREACH_KSEGRP_IN_PROC(p, kg) { 603 FOREACH_UPCALL_IN_GROUP(kg, ku) { 604 if (ku->ku_mailbox == uap->mbx) 605 break; 606 } 607 if (ku) 608 break; 609 } 610 } else { 611 kg = td->td_ksegrp; 612 if (kg->kg_upsleeps) { 613 wakeup_one(&kg->kg_completed); 614 mtx_unlock_spin(&sched_lock); 615 PROC_UNLOCK(p); 616 return (0); 617 } 618 ku = TAILQ_FIRST(&kg->kg_upcalls); 619 } 620 if (ku) { 621 if ((td2 = ku->ku_owner) == NULL) { 622 panic("%s: no owner", __func__); 623 } else if (TD_ON_SLEEPQ(td2) && 624 ((td2->td_wchan == &kg->kg_completed) || 625 (td2->td_wchan == &p->p_siglist && 626 (ku->ku_mflags & KMF_WAITSIGEVENT)))) { 627 abortsleep(td2); 628 } else { 629 ku->ku_flags |= KUF_DOUPCALL; 630 } 631 mtx_unlock_spin(&sched_lock); 632 PROC_UNLOCK(p); 633 return (0); 634 } 635 mtx_unlock_spin(&sched_lock); 636 PROC_UNLOCK(p); 637 return (ESRCH); 638} 639 640/* 641 * No new KSEG: first call: use current KSE, don't schedule an upcall 642 * All other situations, do allocate max new KSEs and schedule an upcall. 643 */ 644/* struct kse_create_args { 645 struct kse_mailbox *mbx; 646 int newgroup; 647}; */ 648int 649kse_create(struct thread *td, struct kse_create_args *uap) 650{ 651 struct kse *newke; 652 struct ksegrp *newkg; 653 struct ksegrp *kg; 654 struct proc *p; 655 struct kse_mailbox mbx; 656 struct kse_upcall *newku; 657 int err, ncpus, sa = 0, first = 0; 658 struct thread *newtd; 659 660 p = td->td_proc; 661 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 662 return (err); 663 664 /* Too bad, why hasn't kernel always a cpu counter !? */ 665#ifdef SMP 666 ncpus = mp_ncpus; 667#else 668 ncpus = 1; 669#endif 670 if (virtual_cpu != 0) 671 ncpus = virtual_cpu; 672 if (!(mbx.km_flags & KMF_BOUND)) 673 sa = TDF_SA; 674 else 675 ncpus = 1; 676 PROC_LOCK(p); 677 if (!(p->p_flag & P_SA)) { 678 first = 1; 679 p->p_flag |= P_SA; 680 } 681 PROC_UNLOCK(p); 682 if (!sa && !uap->newgroup && !first) 683 return (EINVAL); 684 kg = td->td_ksegrp; 685 if (uap->newgroup) { 686 /* Have race condition but it is cheap */ 687 if (p->p_numksegrps >= max_groups_per_proc) 688 return (EPROCLIM); 689 /* 690 * If we want a new KSEGRP it doesn't matter whether 691 * we have already fired up KSE mode before or not. 692 * We put the process in KSE mode and create a new KSEGRP. 693 */ 694 newkg = ksegrp_alloc(); 695 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 696 kg_startzero, kg_endzero)); 697 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 698 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 699 mtx_lock_spin(&sched_lock); 700 if (p->p_numksegrps >= max_groups_per_proc) { 701 mtx_unlock_spin(&sched_lock); 702 ksegrp_free(newkg); 703 return (EPROCLIM); 704 } 705 ksegrp_link(newkg, p); 706 mtx_unlock_spin(&sched_lock); 707 } else { 708 if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0) 709 return (EINVAL); 710 newkg = kg; 711 } 712 713 /* 714 * Creating upcalls more than number of physical cpu does 715 * not help performance. 716 */ 717 if (newkg->kg_numupcalls >= ncpus) 718 return (EPROCLIM); 719 720 if (newkg->kg_numupcalls == 0) { 721 /* 722 * Initialize KSE group 723 * 724 * For multiplxed group, create KSEs as many as physical 725 * cpus. This increases concurrent even if userland 726 * is not MP safe and can only run on single CPU. 727 * In ideal world, every physical cpu should execute a thread. 728 * If there is enough KSEs, threads in kernel can be 729 * executed parallel on different cpus with full speed, 730 * Concurrent in kernel shouldn't be restricted by number of 731 * upcalls userland provides. Adding more upcall structures 732 * only increases concurrent in userland. 733 * 734 * For bound thread group, because there is only thread in the 735 * group, we only create one KSE for the group. Thread in this 736 * kind of group will never schedule an upcall when blocked, 737 * this intends to simulate pthread system scope thread. 738 */ 739 while (newkg->kg_kses < ncpus) { 740 newke = kse_alloc(); 741 bzero(&newke->ke_startzero, RANGEOF(struct kse, 742 ke_startzero, ke_endzero)); 743#if 0 744 mtx_lock_spin(&sched_lock); 745 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 746 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 747 mtx_unlock_spin(&sched_lock); 748#endif 749 mtx_lock_spin(&sched_lock); 750 kse_link(newke, newkg); 751 /* Add engine */ 752 kse_reassign(newke); 753 mtx_unlock_spin(&sched_lock); 754 } 755 } 756 newku = upcall_alloc(); 757 newku->ku_mailbox = uap->mbx; 758 newku->ku_func = mbx.km_func; 759 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 760 761 /* For the first call this may not have been set */ 762 if (td->td_standin == NULL) 763 thread_alloc_spare(td, NULL); 764 765 PROC_LOCK(p); 766 if (newkg->kg_numupcalls >= ncpus) { 767 PROC_UNLOCK(p); 768 upcall_free(newku); 769 return (EPROCLIM); 770 } 771 if (first && sa) { 772 SIGSETOR(p->p_siglist, td->td_siglist); 773 SIGEMPTYSET(td->td_siglist); 774 SIGFILLSET(td->td_sigmask); 775 SIG_CANTMASK(td->td_sigmask); 776 } 777 mtx_lock_spin(&sched_lock); 778 PROC_UNLOCK(p); 779 upcall_link(newku, newkg); 780 if (mbx.km_quantum) 781 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 782 783 /* 784 * Each upcall structure has an owner thread, find which 785 * one owns it. 786 */ 787 if (uap->newgroup) { 788 /* 789 * Because new ksegrp hasn't thread, 790 * create an initial upcall thread to own it. 791 */ 792 newtd = thread_schedule_upcall(td, newku); 793 } else { 794 /* 795 * If current thread hasn't an upcall structure, 796 * just assign the upcall to it. 797 */ 798 if (td->td_upcall == NULL) { 799 newku->ku_owner = td; 800 td->td_upcall = newku; 801 newtd = td; 802 } else { 803 /* 804 * Create a new upcall thread to own it. 805 */ 806 newtd = thread_schedule_upcall(td, newku); 807 } 808 } 809 if (!sa) { 810 newtd->td_mailbox = mbx.km_curthread; 811 newtd->td_flags &= ~TDF_SA; 812 if (newtd != td) { 813 mtx_unlock_spin(&sched_lock); 814 cpu_set_upcall_kse(newtd, newku); 815 mtx_lock_spin(&sched_lock); 816 } 817 } else { 818 newtd->td_flags |= TDF_SA; 819 } 820 if (newtd != td) 821 setrunqueue(newtd); 822 mtx_unlock_spin(&sched_lock); 823 return (0); 824} 825 826/* 827 * Initialize global thread allocation resources. 828 */ 829void 830threadinit(void) 831{ 832 833 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 834 thread_ctor, thread_dtor, thread_init, thread_fini, 835 UMA_ALIGN_CACHE, 0); 836 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 837 NULL, NULL, ksegrp_init, NULL, 838 UMA_ALIGN_CACHE, 0); 839 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 840 NULL, NULL, kse_init, NULL, 841 UMA_ALIGN_CACHE, 0); 842 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 843 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 844} 845 846/* 847 * Stash an embarasingly extra thread into the zombie thread queue. 848 */ 849void 850thread_stash(struct thread *td) 851{ 852 mtx_lock_spin(&kse_zombie_lock); 853 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 854 mtx_unlock_spin(&kse_zombie_lock); 855} 856 857/* 858 * Stash an embarasingly extra kse into the zombie kse queue. 859 */ 860void 861kse_stash(struct kse *ke) 862{ 863 mtx_lock_spin(&kse_zombie_lock); 864 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 865 mtx_unlock_spin(&kse_zombie_lock); 866} 867 868/* 869 * Stash an embarasingly extra upcall into the zombie upcall queue. 870 */ 871 872void 873upcall_stash(struct kse_upcall *ku) 874{ 875 mtx_lock_spin(&kse_zombie_lock); 876 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 877 mtx_unlock_spin(&kse_zombie_lock); 878} 879 880/* 881 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 882 */ 883void 884ksegrp_stash(struct ksegrp *kg) 885{ 886 mtx_lock_spin(&kse_zombie_lock); 887 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 888 mtx_unlock_spin(&kse_zombie_lock); 889} 890 891/* 892 * Reap zombie kse resource. 893 */ 894void 895thread_reap(void) 896{ 897 struct thread *td_first, *td_next; 898 struct kse *ke_first, *ke_next; 899 struct ksegrp *kg_first, * kg_next; 900 struct kse_upcall *ku_first, *ku_next; 901 902 /* 903 * Don't even bother to lock if none at this instant, 904 * we really don't care about the next instant.. 905 */ 906 if ((!TAILQ_EMPTY(&zombie_threads)) 907 || (!TAILQ_EMPTY(&zombie_kses)) 908 || (!TAILQ_EMPTY(&zombie_ksegrps)) 909 || (!TAILQ_EMPTY(&zombie_upcalls))) { 910 mtx_lock_spin(&kse_zombie_lock); 911 td_first = TAILQ_FIRST(&zombie_threads); 912 ke_first = TAILQ_FIRST(&zombie_kses); 913 kg_first = TAILQ_FIRST(&zombie_ksegrps); 914 ku_first = TAILQ_FIRST(&zombie_upcalls); 915 if (td_first) 916 TAILQ_INIT(&zombie_threads); 917 if (ke_first) 918 TAILQ_INIT(&zombie_kses); 919 if (kg_first) 920 TAILQ_INIT(&zombie_ksegrps); 921 if (ku_first) 922 TAILQ_INIT(&zombie_upcalls); 923 mtx_unlock_spin(&kse_zombie_lock); 924 while (td_first) { 925 td_next = TAILQ_NEXT(td_first, td_runq); 926 if (td_first->td_ucred) 927 crfree(td_first->td_ucred); 928 thread_free(td_first); 929 td_first = td_next; 930 } 931 while (ke_first) { 932 ke_next = TAILQ_NEXT(ke_first, ke_procq); 933 kse_free(ke_first); 934 ke_first = ke_next; 935 } 936 while (kg_first) { 937 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 938 ksegrp_free(kg_first); 939 kg_first = kg_next; 940 } 941 while (ku_first) { 942 ku_next = TAILQ_NEXT(ku_first, ku_link); 943 upcall_free(ku_first); 944 ku_first = ku_next; 945 } 946 } 947} 948 949/* 950 * Allocate a ksegrp. 951 */ 952struct ksegrp * 953ksegrp_alloc(void) 954{ 955 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 956} 957 958/* 959 * Allocate a kse. 960 */ 961struct kse * 962kse_alloc(void) 963{ 964 return (uma_zalloc(kse_zone, M_WAITOK)); 965} 966 967/* 968 * Allocate a thread. 969 */ 970struct thread * 971thread_alloc(void) 972{ 973 thread_reap(); /* check if any zombies to get */ 974 return (uma_zalloc(thread_zone, M_WAITOK)); 975} 976 977/* 978 * Deallocate a ksegrp. 979 */ 980void 981ksegrp_free(struct ksegrp *td) 982{ 983 uma_zfree(ksegrp_zone, td); 984} 985 986/* 987 * Deallocate a kse. 988 */ 989void 990kse_free(struct kse *td) 991{ 992 uma_zfree(kse_zone, td); 993} 994 995/* 996 * Deallocate a thread. 997 */ 998void 999thread_free(struct thread *td) 1000{ 1001 1002 cpu_thread_clean(td); 1003 uma_zfree(thread_zone, td); 1004} 1005 1006/* 1007 * Store the thread context in the UTS's mailbox. 1008 * then add the mailbox at the head of a list we are building in user space. 1009 * The list is anchored in the ksegrp structure. 1010 */ 1011int 1012thread_export_context(struct thread *td, int willexit) 1013{ 1014 struct proc *p; 1015 struct ksegrp *kg; 1016 uintptr_t mbx; 1017 void *addr; 1018 int error = 0, temp, sig; 1019 mcontext_t mc; 1020 1021 p = td->td_proc; 1022 kg = td->td_ksegrp; 1023 1024 /* Export the user/machine context. */ 1025 get_mcontext(td, &mc, 0); 1026 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); 1027 error = copyout(&mc, addr, sizeof(mcontext_t)); 1028 if (error) 1029 goto bad; 1030 1031 /* Exports clock ticks in kernel mode */ 1032 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 1033 temp = fuword32(addr) + td->td_usticks; 1034 if (suword32(addr, temp)) { 1035 error = EFAULT; 1036 goto bad; 1037 } 1038 1039 /* 1040 * Post sync signal, or process SIGKILL and SIGSTOP. 1041 * For sync signal, it is only possible when the signal is not 1042 * caught by userland or process is being debugged. 1043 */ 1044 PROC_LOCK(p); 1045 if (td->td_flags & TDF_NEEDSIGCHK) { 1046 mtx_lock_spin(&sched_lock); 1047 td->td_flags &= ~TDF_NEEDSIGCHK; 1048 mtx_unlock_spin(&sched_lock); 1049 mtx_lock(&p->p_sigacts->ps_mtx); 1050 while ((sig = cursig(td)) != 0) 1051 postsig(sig); 1052 mtx_unlock(&p->p_sigacts->ps_mtx); 1053 } 1054 if (willexit) 1055 SIGFILLSET(td->td_sigmask); 1056 PROC_UNLOCK(p); 1057 1058 /* Get address in latest mbox of list pointer */ 1059 addr = (void *)(&td->td_mailbox->tm_next); 1060 /* 1061 * Put the saved address of the previous first 1062 * entry into this one 1063 */ 1064 for (;;) { 1065 mbx = (uintptr_t)kg->kg_completed; 1066 if (suword(addr, mbx)) { 1067 error = EFAULT; 1068 goto bad; 1069 } 1070 PROC_LOCK(p); 1071 if (mbx == (uintptr_t)kg->kg_completed) { 1072 kg->kg_completed = td->td_mailbox; 1073 /* 1074 * The thread context may be taken away by 1075 * other upcall threads when we unlock 1076 * process lock. it's no longer valid to 1077 * use it again in any other places. 1078 */ 1079 td->td_mailbox = NULL; 1080 PROC_UNLOCK(p); 1081 break; 1082 } 1083 PROC_UNLOCK(p); 1084 } 1085 td->td_usticks = 0; 1086 return (0); 1087 1088bad: 1089 PROC_LOCK(p); 1090 sigexit(td, SIGILL); 1091 return (error); 1092} 1093 1094/* 1095 * Take the list of completed mailboxes for this KSEGRP and put them on this 1096 * upcall's mailbox as it's the next one going up. 1097 */ 1098static int 1099thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1100{ 1101 struct proc *p = kg->kg_proc; 1102 void *addr; 1103 uintptr_t mbx; 1104 1105 addr = (void *)(&ku->ku_mailbox->km_completed); 1106 for (;;) { 1107 mbx = (uintptr_t)kg->kg_completed; 1108 if (suword(addr, mbx)) { 1109 PROC_LOCK(p); 1110 psignal(p, SIGSEGV); 1111 PROC_UNLOCK(p); 1112 return (EFAULT); 1113 } 1114 PROC_LOCK(p); 1115 if (mbx == (uintptr_t)kg->kg_completed) { 1116 kg->kg_completed = NULL; 1117 PROC_UNLOCK(p); 1118 break; 1119 } 1120 PROC_UNLOCK(p); 1121 } 1122 return (0); 1123} 1124 1125/* 1126 * This function should be called at statclock interrupt time 1127 */ 1128int 1129thread_statclock(int user) 1130{ 1131 struct thread *td = curthread; 1132 struct ksegrp *kg = td->td_ksegrp; 1133 1134 if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA)) 1135 return (0); 1136 if (user) { 1137 /* Current always do via ast() */ 1138 mtx_lock_spin(&sched_lock); 1139 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1140 mtx_unlock_spin(&sched_lock); 1141 td->td_uuticks++; 1142 } else { 1143 if (td->td_mailbox != NULL) 1144 td->td_usticks++; 1145 else { 1146 /* XXXKSE 1147 * We will call thread_user_enter() for every 1148 * kernel entry in future, so if the thread mailbox 1149 * is NULL, it must be a UTS kernel, don't account 1150 * clock ticks for it. 1151 */ 1152 } 1153 } 1154 return (0); 1155} 1156 1157/* 1158 * Export state clock ticks for userland 1159 */ 1160static int 1161thread_update_usr_ticks(struct thread *td, int user) 1162{ 1163 struct proc *p = td->td_proc; 1164 struct kse_thr_mailbox *tmbx; 1165 struct kse_upcall *ku; 1166 struct ksegrp *kg; 1167 caddr_t addr;
|
1169 1170 if ((ku = td->td_upcall) == NULL) 1171 return (-1); 1172 1173 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1174 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1175 return (-1); 1176 if (user) { 1177 uticks = td->td_uuticks; 1178 td->td_uuticks = 0; 1179 addr = (caddr_t)&tmbx->tm_uticks; 1180 } else { 1181 uticks = td->td_usticks; 1182 td->td_usticks = 0; 1183 addr = (caddr_t)&tmbx->tm_sticks; 1184 } 1185 if (uticks) { 1186 if (suword32(addr, uticks+fuword32(addr))) { 1187 PROC_LOCK(p); 1188 psignal(p, SIGSEGV); 1189 PROC_UNLOCK(p); 1190 return (-2); 1191 } 1192 } 1193 kg = td->td_ksegrp; 1194 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1195 mtx_lock_spin(&sched_lock); 1196 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1197 mtx_unlock_spin(&sched_lock); 1198 } 1199 return (0); 1200} 1201 1202/* 1203 * Discard the current thread and exit from its context. 1204 * 1205 * Because we can't free a thread while we're operating under its context, 1206 * push the current thread into our CPU's deadthread holder. This means 1207 * we needn't worry about someone else grabbing our context before we 1208 * do a cpu_throw(). 1209 */ 1210void 1211thread_exit(void) 1212{ 1213 struct thread *td; 1214 struct kse *ke; 1215 struct proc *p; 1216 struct ksegrp *kg; 1217 1218 td = curthread; 1219 kg = td->td_ksegrp; 1220 p = td->td_proc; 1221 ke = td->td_kse; 1222 1223 mtx_assert(&sched_lock, MA_OWNED); 1224 KASSERT(p != NULL, ("thread exiting without a process")); 1225 KASSERT(ke != NULL, ("thread exiting without a kse")); 1226 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1227 PROC_LOCK_ASSERT(p, MA_OWNED); 1228 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1229 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1230 1231 if (td->td_standin != NULL) { 1232 thread_stash(td->td_standin); 1233 td->td_standin = NULL; 1234 } 1235 1236 cpu_thread_exit(td); /* XXXSMP */ 1237 1238 /* 1239 * The last thread is left attached to the process 1240 * So that the whole bundle gets recycled. Skip 1241 * all this stuff. 1242 */ 1243 if (p->p_numthreads > 1) { 1244 thread_unlink(td); 1245 if (p->p_maxthrwaits) 1246 wakeup(&p->p_numthreads); 1247 /* 1248 * The test below is NOT true if we are the 1249 * sole exiting thread. P_STOPPED_SNGL is unset 1250 * in exit1() after it is the only survivor. 1251 */ 1252 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1253 if (p->p_numthreads == p->p_suspcount) { 1254 thread_unsuspend_one(p->p_singlethread); 1255 } 1256 } 1257 1258 /* 1259 * Because each upcall structure has an owner thread, 1260 * owner thread exits only when process is in exiting 1261 * state, so upcall to userland is no longer needed, 1262 * deleting upcall structure is safe here. 1263 * So when all threads in a group is exited, all upcalls 1264 * in the group should be automatically freed. 1265 */ 1266 if (td->td_upcall) 1267 upcall_remove(td); 1268 1269 ke->ke_state = KES_UNQUEUED; 1270 ke->ke_thread = NULL; 1271 /* 1272 * Decide what to do with the KSE attached to this thread. 1273 */ 1274 if (ke->ke_flags & KEF_EXIT) 1275 kse_unlink(ke); 1276 else 1277 kse_reassign(ke); 1278 PROC_UNLOCK(p); 1279 td->td_kse = NULL; 1280 td->td_state = TDS_INACTIVE; 1281#if 0 1282 td->td_proc = NULL; 1283#endif 1284 td->td_ksegrp = NULL; 1285 td->td_last_kse = NULL; 1286 PCPU_SET(deadthread, td); 1287 } else { 1288 PROC_UNLOCK(p); 1289 } 1290 /* XXX Shouldn't cpu_throw() here. */ 1291 mtx_assert(&sched_lock, MA_OWNED); 1292#if !defined(__alpha__) && !defined(__powerpc__) 1293 cpu_throw(td, choosethread()); 1294#else 1295 cpu_throw(); 1296#endif 1297 panic("I'm a teapot!"); 1298 /* NOTREACHED */ 1299} 1300 1301/* 1302 * Do any thread specific cleanups that may be needed in wait() 1303 * called with Giant held, proc and schedlock not held. 1304 */ 1305void 1306thread_wait(struct proc *p) 1307{ 1308 struct thread *td; 1309 1310 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1311 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1312 FOREACH_THREAD_IN_PROC(p, td) { 1313 if (td->td_standin != NULL) { 1314 thread_free(td->td_standin); 1315 td->td_standin = NULL; 1316 } 1317 cpu_thread_clean(td); 1318 } 1319 thread_reap(); /* check for zombie threads etc. */ 1320} 1321 1322/* 1323 * Link a thread to a process. 1324 * set up anything that needs to be initialized for it to 1325 * be used by the process. 1326 * 1327 * Note that we do not link to the proc's ucred here. 1328 * The thread is linked as if running but no KSE assigned. 1329 */ 1330void 1331thread_link(struct thread *td, struct ksegrp *kg) 1332{ 1333 struct proc *p; 1334 1335 p = kg->kg_proc; 1336 td->td_state = TDS_INACTIVE; 1337 td->td_proc = p; 1338 td->td_ksegrp = kg; 1339 td->td_last_kse = NULL; 1340 td->td_flags = 0; 1341 td->td_kse = NULL; 1342 1343 LIST_INIT(&td->td_contested); 1344 callout_init(&td->td_slpcallout, 1); 1345 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1346 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1347 p->p_numthreads++; 1348 kg->kg_numthreads++; 1349} 1350 1351void 1352thread_unlink(struct thread *td) 1353{ 1354 struct proc *p = td->td_proc; 1355 struct ksegrp *kg = td->td_ksegrp; 1356 1357 mtx_assert(&sched_lock, MA_OWNED); 1358 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1359 p->p_numthreads--; 1360 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1361 kg->kg_numthreads--; 1362 /* could clear a few other things here */ 1363} 1364 1365/* 1366 * Purge a ksegrp resource. When a ksegrp is preparing to 1367 * exit, it calls this function. 1368 */ 1369static void 1370kse_purge_group(struct thread *td) 1371{ 1372 struct ksegrp *kg; 1373 struct kse *ke; 1374 1375 kg = td->td_ksegrp; 1376 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1377 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1378 KASSERT(ke->ke_state == KES_IDLE, 1379 ("%s: wrong idle KSE state", __func__)); 1380 kse_unlink(ke); 1381 } 1382 KASSERT((kg->kg_kses == 1), 1383 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1384 KASSERT((kg->kg_numupcalls == 0), 1385 ("%s: ksegrp still has %d upcall datas", 1386 __func__, kg->kg_numupcalls)); 1387} 1388 1389/* 1390 * Purge a process's KSE resource. When a process is preparing to 1391 * exit, it calls kse_purge to release any extra KSE resources in 1392 * the process. 1393 */ 1394static void 1395kse_purge(struct proc *p, struct thread *td) 1396{ 1397 struct ksegrp *kg; 1398 struct kse *ke; 1399 1400 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1401 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1402 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1403 p->p_numksegrps--; 1404 /* 1405 * There is no ownership for KSE, after all threads 1406 * in the group exited, it is possible that some KSEs 1407 * were left in idle queue, gc them now. 1408 */ 1409 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1410 KASSERT(ke->ke_state == KES_IDLE, 1411 ("%s: wrong idle KSE state", __func__)); 1412 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1413 kg->kg_idle_kses--; 1414 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1415 kg->kg_kses--; 1416 kse_stash(ke); 1417 } 1418 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1419 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1420 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1421 KASSERT((kg->kg_numupcalls == 0), 1422 ("%s: ksegrp still has %d upcall datas", 1423 __func__, kg->kg_numupcalls)); 1424 1425 if (kg != td->td_ksegrp) 1426 ksegrp_stash(kg); 1427 } 1428 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1429 p->p_numksegrps++; 1430} 1431 1432/* 1433 * This function is intended to be used to initialize a spare thread 1434 * for upcall. Initialize thread's large data area outside sched_lock 1435 * for thread_schedule_upcall(). 1436 */ 1437void 1438thread_alloc_spare(struct thread *td, struct thread *spare) 1439{ 1440 if (td->td_standin) 1441 return; 1442 if (spare == NULL) 1443 spare = thread_alloc(); 1444 td->td_standin = spare; 1445 bzero(&spare->td_startzero, 1446 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1447 spare->td_proc = td->td_proc; 1448 spare->td_ucred = crhold(td->td_ucred); 1449} 1450 1451/* 1452 * Create a thread and schedule it for upcall on the KSE given. 1453 * Use our thread's standin so that we don't have to allocate one. 1454 */ 1455struct thread * 1456thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1457{ 1458 struct thread *td2; 1459 1460 mtx_assert(&sched_lock, MA_OWNED); 1461 1462 /* 1463 * Schedule an upcall thread on specified kse_upcall, 1464 * the kse_upcall must be free. 1465 * td must have a spare thread. 1466 */ 1467 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1468 if ((td2 = td->td_standin) != NULL) { 1469 td->td_standin = NULL; 1470 } else { 1471 panic("no reserve thread when scheduling an upcall"); 1472 return (NULL); 1473 } 1474 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1475 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1476 bcopy(&td->td_startcopy, &td2->td_startcopy, 1477 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1478 thread_link(td2, ku->ku_ksegrp); 1479 /* inherit blocked thread's context */ 1480 cpu_set_upcall(td2, td); 1481 /* Let the new thread become owner of the upcall */ 1482 ku->ku_owner = td2; 1483 td2->td_upcall = ku; 1484 td2->td_flags = TDF_SA; 1485 td2->td_pflags = TDP_UPCALLING; 1486 td2->td_kse = NULL; 1487 td2->td_state = TDS_CAN_RUN; 1488 td2->td_inhibitors = 0; 1489 SIGFILLSET(td2->td_sigmask); 1490 SIG_CANTMASK(td2->td_sigmask); 1491 return (td2); /* bogus.. should be a void function */ 1492} 1493 1494/* 1495 * It is only used when thread generated a trap and process is being 1496 * debugged. 1497 */ 1498void 1499thread_signal_add(struct thread *td, int sig) 1500{ 1501 struct proc *p; 1502 siginfo_t siginfo; 1503 struct sigacts *ps; 1504 int error; 1505 1506 p = td->td_proc; 1507 PROC_LOCK_ASSERT(p, MA_OWNED); 1508 ps = p->p_sigacts; 1509 mtx_assert(&ps->ps_mtx, MA_OWNED); 1510 1511 cpu_thread_siginfo(sig, 0, &siginfo); 1512 mtx_unlock(&ps->ps_mtx); 1513 PROC_UNLOCK(p); 1514 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo)); 1515 if (error) { 1516 PROC_LOCK(p); 1517 sigexit(td, SIGILL); 1518 } 1519 PROC_LOCK(p); 1520 SIGADDSET(td->td_sigmask, sig); 1521 mtx_lock(&ps->ps_mtx); 1522} 1523 1524void 1525thread_switchout(struct thread *td) 1526{ 1527 struct kse_upcall *ku; 1528 struct thread *td2; 1529 1530 mtx_assert(&sched_lock, MA_OWNED); 1531 1532 /* 1533 * If the outgoing thread is in threaded group and has never 1534 * scheduled an upcall, decide whether this is a short 1535 * or long term event and thus whether or not to schedule 1536 * an upcall. 1537 * If it is a short term event, just suspend it in 1538 * a way that takes its KSE with it. 1539 * Select the events for which we want to schedule upcalls. 1540 * For now it's just sleep. 1541 * XXXKSE eventually almost any inhibition could do. 1542 */ 1543 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1544 /* 1545 * Release ownership of upcall, and schedule an upcall 1546 * thread, this new upcall thread becomes the owner of 1547 * the upcall structure. 1548 */ 1549 ku = td->td_upcall; 1550 ku->ku_owner = NULL; 1551 td->td_upcall = NULL; 1552 td->td_flags &= ~TDF_CAN_UNBIND; 1553 td2 = thread_schedule_upcall(td, ku); 1554 setrunqueue(td2); 1555 } 1556} 1557 1558/* 1559 * Setup done on the thread when it enters the kernel. 1560 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1561 */ 1562void 1563thread_user_enter(struct proc *p, struct thread *td) 1564{ 1565 struct ksegrp *kg; 1566 struct kse_upcall *ku; 1567 struct kse_thr_mailbox *tmbx; 1568 uint32_t tflags; 1569 1570 kg = td->td_ksegrp; 1571 1572 /* 1573 * First check that we shouldn't just abort. 1574 * But check if we are the single thread first! 1575 */ 1576 if (p->p_flag & P_SINGLE_EXIT) { 1577 PROC_LOCK(p); 1578 mtx_lock_spin(&sched_lock); 1579 thread_stopped(p); 1580 thread_exit(); 1581 /* NOTREACHED */ 1582 } 1583 1584 /* 1585 * If we are doing a syscall in a KSE environment, 1586 * note where our mailbox is. There is always the 1587 * possibility that we could do this lazily (in kse_reassign()), 1588 * but for now do it every time. 1589 */ 1590 kg = td->td_ksegrp; 1591 if (td->td_flags & TDF_SA) { 1592 ku = td->td_upcall; 1593 KASSERT(ku, ("%s: no upcall owned", __func__)); 1594 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1595 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); 1596 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags); 1597 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1598 if ((tmbx == NULL) || (tmbx == (void *)-1L) || 1599 (ku->ku_mflags & KMF_NOUPCALL)) { 1600 td->td_mailbox = NULL; 1601 } else { 1602 if (td->td_standin == NULL) 1603 thread_alloc_spare(td, NULL); 1604 tflags = fuword32(tmbx); 1605 /* 1606 * On some architectures, TP register points to thread 1607 * mailbox but not points to kse mailbox, and userland 1608 * can not atomically clear km_curthread, but can 1609 * use TP register, and set TMF_NOUPCALL in thread 1610 * flag to indicate a critical region. 1611 */ 1612 if (tflags & TMF_NOUPCALL) { 1613 td->td_mailbox = NULL; 1614 } else { 1615 td->td_mailbox = tmbx; 1616 mtx_lock_spin(&sched_lock); 1617 td->td_flags |= TDF_CAN_UNBIND; 1618 mtx_unlock_spin(&sched_lock); 1619 } 1620 } 1621 } 1622} 1623 1624/* 1625 * The extra work we go through if we are a threaded process when we 1626 * return to userland. 1627 * 1628 * If we are a KSE process and returning to user mode, check for 1629 * extra work to do before we return (e.g. for more syscalls 1630 * to complete first). If we were in a critical section, we should 1631 * just return to let it finish. Same if we were in the UTS (in 1632 * which case the mailbox's context's busy indicator will be set). 1633 * The only traps we suport will have set the mailbox. 1634 * We will clear it here. 1635 */ 1636int 1637thread_userret(struct thread *td, struct trapframe *frame) 1638{ 1639 int error = 0, upcalls, uts_crit; 1640 struct kse_upcall *ku; 1641 struct ksegrp *kg, *kg2; 1642 struct proc *p; 1643 struct timespec ts; 1644 1645 p = td->td_proc; 1646 kg = td->td_ksegrp; 1647 ku = td->td_upcall; 1648 1649 /* Nothing to do with bound thread */ 1650 if (!(td->td_flags & TDF_SA)) 1651 return (0); 1652 1653 /* 1654 * Stat clock interrupt hit in userland, it 1655 * is returning from interrupt, charge thread's 1656 * userland time for UTS. 1657 */ 1658 if (td->td_flags & TDF_USTATCLOCK) { 1659 thread_update_usr_ticks(td, 1); 1660 mtx_lock_spin(&sched_lock); 1661 td->td_flags &= ~TDF_USTATCLOCK; 1662 mtx_unlock_spin(&sched_lock); 1663 if (kg->kg_completed || 1664 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1665 thread_user_enter(p, td); 1666 } 1667 1668 uts_crit = (td->td_mailbox == NULL); 1669 /* 1670 * Optimisation: 1671 * This thread has not started any upcall. 1672 * If there is no work to report other than ourself, 1673 * then it can return direct to userland. 1674 */ 1675 if (TD_CAN_UNBIND(td)) { 1676 mtx_lock_spin(&sched_lock); 1677 td->td_flags &= ~TDF_CAN_UNBIND; 1678 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1679 (kg->kg_completed == NULL) && 1680 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1681 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { 1682 mtx_unlock_spin(&sched_lock); 1683 thread_update_usr_ticks(td, 0); 1684 nanotime(&ts); 1685 error = copyout(&ts, 1686 (caddr_t)&ku->ku_mailbox->km_timeofday, 1687 sizeof(ts)); 1688 td->td_mailbox = 0; 1689 ku->ku_mflags = 0; 1690 if (error) 1691 goto out; 1692 return (0); 1693 } 1694 mtx_unlock_spin(&sched_lock); 1695 thread_export_context(td, 0); 1696 /* 1697 * There is something to report, and we own an upcall 1698 * strucuture, we can go to userland. 1699 * Turn ourself into an upcall thread. 1700 */ 1701 td->td_pflags |= TDP_UPCALLING; 1702 } else if (td->td_mailbox && (ku == NULL)) { 1703 thread_export_context(td, 1); 1704 PROC_LOCK(p); 1705 /* 1706 * There are upcall threads waiting for 1707 * work to do, wake one of them up. 1708 * XXXKSE Maybe wake all of them up. 1709 */ 1710 if (kg->kg_upsleeps) 1711 wakeup_one(&kg->kg_completed); 1712 mtx_lock_spin(&sched_lock); 1713 thread_stopped(p); 1714 thread_exit(); 1715 /* NOTREACHED */ 1716 } 1717 1718 KASSERT(ku != NULL, ("upcall is NULL\n")); 1719 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1720 1721 if (p->p_numthreads > max_threads_per_proc) { 1722 max_threads_hits++; 1723 PROC_LOCK(p); 1724 mtx_lock_spin(&sched_lock); 1725 p->p_maxthrwaits++; 1726 while (p->p_numthreads > max_threads_per_proc) { 1727 upcalls = 0; 1728 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1729 if (kg2->kg_numupcalls == 0) 1730 upcalls++; 1731 else 1732 upcalls += kg2->kg_numupcalls; 1733 } 1734 if (upcalls >= max_threads_per_proc) 1735 break; 1736 mtx_unlock_spin(&sched_lock); 1737 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1738 "maxthreads", NULL)) { 1739 mtx_lock_spin(&sched_lock); 1740 break; 1741 } else { 1742 mtx_lock_spin(&sched_lock); 1743 } 1744 } 1745 p->p_maxthrwaits--; 1746 mtx_unlock_spin(&sched_lock); 1747 PROC_UNLOCK(p); 1748 } 1749 1750 if (td->td_pflags & TDP_UPCALLING) { 1751 uts_crit = 0; 1752 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1753 /* 1754 * There is no more work to do and we are going to ride 1755 * this thread up to userland as an upcall. 1756 * Do the last parts of the setup needed for the upcall. 1757 */ 1758 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1759 td, td->td_proc->p_pid, td->td_proc->p_comm); 1760 1761 td->td_pflags &= ~TDP_UPCALLING; 1762 if (ku->ku_flags & KUF_DOUPCALL) { 1763 mtx_lock_spin(&sched_lock); 1764 ku->ku_flags &= ~KUF_DOUPCALL; 1765 mtx_unlock_spin(&sched_lock); 1766 } 1767 /* 1768 * Set user context to the UTS 1769 */ 1770 if (!(ku->ku_mflags & KMF_NOUPCALL)) { 1771 cpu_set_upcall_kse(td, ku); 1772 error = suword(&ku->ku_mailbox->km_curthread, 0); 1773 if (error) 1774 goto out; 1775 } 1776 1777 /* 1778 * Unhook the list of completed threads. 1779 * anything that completes after this gets to 1780 * come in next time. 1781 * Put the list of completed thread mailboxes on 1782 * this KSE's mailbox. 1783 */ 1784 if (!(ku->ku_mflags & KMF_NOCOMPLETED) && 1785 (error = thread_link_mboxes(kg, ku)) != 0) 1786 goto out; 1787 } 1788 if (!uts_crit) { 1789 nanotime(&ts); 1790 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); 1791 } 1792 1793out: 1794 if (error) { 1795 /* 1796 * Things are going to be so screwed we should just kill 1797 * the process. 1798 * how do we do that? 1799 */ 1800 PROC_LOCK(td->td_proc); 1801 psignal(td->td_proc, SIGSEGV); 1802 PROC_UNLOCK(td->td_proc); 1803 } else { 1804 /* 1805 * Optimisation: 1806 * Ensure that we have a spare thread available, 1807 * for when we re-enter the kernel. 1808 */ 1809 if (td->td_standin == NULL) 1810 thread_alloc_spare(td, NULL); 1811 } 1812 1813 ku->ku_mflags = 0; 1814 /* 1815 * Clear thread mailbox first, then clear system tick count. 1816 * The order is important because thread_statclock() use 1817 * mailbox pointer to see if it is an userland thread or 1818 * an UTS kernel thread. 1819 */ 1820 td->td_mailbox = NULL; 1821 td->td_usticks = 0; 1822 return (error); /* go sync */ 1823} 1824 1825/* 1826 * Enforce single-threading. 1827 * 1828 * Returns 1 if the caller must abort (another thread is waiting to 1829 * exit the process or similar). Process is locked! 1830 * Returns 0 when you are successfully the only thread running. 1831 * A process has successfully single threaded in the suspend mode when 1832 * There are no threads in user mode. Threads in the kernel must be 1833 * allowed to continue until they get to the user boundary. They may even 1834 * copy out their return values and data before suspending. They may however be 1835 * accellerated in reaching the user boundary as we will wake up 1836 * any sleeping threads that are interruptable. (PCATCH). 1837 */ 1838int 1839thread_single(int force_exit) 1840{ 1841 struct thread *td; 1842 struct thread *td2; 1843 struct proc *p; 1844 1845 td = curthread; 1846 p = td->td_proc; 1847 mtx_assert(&Giant, MA_OWNED); 1848 PROC_LOCK_ASSERT(p, MA_OWNED); 1849 KASSERT((td != NULL), ("curthread is NULL")); 1850 1851 if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1) 1852 return (0); 1853 1854 /* Is someone already single threading? */ 1855 if (p->p_singlethread) 1856 return (1); 1857 1858 if (force_exit == SINGLE_EXIT) { 1859 p->p_flag |= P_SINGLE_EXIT; 1860 } else 1861 p->p_flag &= ~P_SINGLE_EXIT; 1862 p->p_flag |= P_STOPPED_SINGLE; 1863 mtx_lock_spin(&sched_lock); 1864 p->p_singlethread = td; 1865 while ((p->p_numthreads - p->p_suspcount) != 1) { 1866 FOREACH_THREAD_IN_PROC(p, td2) { 1867 if (td2 == td) 1868 continue; 1869 td2->td_flags |= TDF_ASTPENDING; 1870 if (TD_IS_INHIBITED(td2)) { 1871 if (force_exit == SINGLE_EXIT) { 1872 if (TD_IS_SUSPENDED(td2)) { 1873 thread_unsuspend_one(td2); 1874 } 1875 if (TD_ON_SLEEPQ(td2) && 1876 (td2->td_flags & TDF_SINTR)) { 1877 if (td2->td_flags & TDF_CVWAITQ) 1878 cv_abort(td2); 1879 else 1880 abortsleep(td2); 1881 } 1882 } else { 1883 if (TD_IS_SUSPENDED(td2)) 1884 continue; 1885 /* 1886 * maybe other inhibitted states too? 1887 * XXXKSE Is it totally safe to 1888 * suspend a non-interruptable thread? 1889 */ 1890 if (td2->td_inhibitors & 1891 (TDI_SLEEPING | TDI_SWAPPED)) 1892 thread_suspend_one(td2); 1893 } 1894 } 1895 } 1896 /* 1897 * Maybe we suspended some threads.. was it enough? 1898 */ 1899 if ((p->p_numthreads - p->p_suspcount) == 1) 1900 break; 1901 1902 /* 1903 * Wake us up when everyone else has suspended. 1904 * In the mean time we suspend as well. 1905 */ 1906 thread_suspend_one(td); 1907 DROP_GIANT(); 1908 PROC_UNLOCK(p); 1909 p->p_stats->p_ru.ru_nvcsw++; 1910 mi_switch(); 1911 mtx_unlock_spin(&sched_lock); 1912 PICKUP_GIANT(); 1913 PROC_LOCK(p); 1914 mtx_lock_spin(&sched_lock); 1915 } 1916 if (force_exit == SINGLE_EXIT) { 1917 if (td->td_upcall) 1918 upcall_remove(td); 1919 kse_purge(p, td); 1920 } 1921 mtx_unlock_spin(&sched_lock); 1922 return (0); 1923} 1924 1925/* 1926 * Called in from locations that can safely check to see 1927 * whether we have to suspend or at least throttle for a 1928 * single-thread event (e.g. fork). 1929 * 1930 * Such locations include userret(). 1931 * If the "return_instead" argument is non zero, the thread must be able to 1932 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1933 * 1934 * The 'return_instead' argument tells the function if it may do a 1935 * thread_exit() or suspend, or whether the caller must abort and back 1936 * out instead. 1937 * 1938 * If the thread that set the single_threading request has set the 1939 * P_SINGLE_EXIT bit in the process flags then this call will never return 1940 * if 'return_instead' is false, but will exit. 1941 * 1942 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1943 *---------------+--------------------+--------------------- 1944 * 0 | returns 0 | returns 0 or 1 1945 * | when ST ends | immediatly 1946 *---------------+--------------------+--------------------- 1947 * 1 | thread exits | returns 1 1948 * | | immediatly 1949 * 0 = thread_exit() or suspension ok, 1950 * other = return error instead of stopping the thread. 1951 * 1952 * While a full suspension is under effect, even a single threading 1953 * thread would be suspended if it made this call (but it shouldn't). 1954 * This call should only be made from places where 1955 * thread_exit() would be safe as that may be the outcome unless 1956 * return_instead is set. 1957 */ 1958int 1959thread_suspend_check(int return_instead) 1960{ 1961 struct thread *td; 1962 struct proc *p; 1963 1964 td = curthread; 1965 p = td->td_proc; 1966 PROC_LOCK_ASSERT(p, MA_OWNED); 1967 while (P_SHOULDSTOP(p)) { 1968 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1969 KASSERT(p->p_singlethread != NULL, 1970 ("singlethread not set")); 1971 /* 1972 * The only suspension in action is a 1973 * single-threading. Single threader need not stop. 1974 * XXX Should be safe to access unlocked 1975 * as it can only be set to be true by us. 1976 */ 1977 if (p->p_singlethread == td) 1978 return (0); /* Exempt from stopping. */ 1979 } 1980 if (return_instead) 1981 return (1); 1982 1983 mtx_lock_spin(&sched_lock); 1984 thread_stopped(p); 1985 /* 1986 * If the process is waiting for us to exit, 1987 * this thread should just suicide. 1988 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1989 */ 1990 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1991 while (mtx_owned(&Giant)) 1992 mtx_unlock(&Giant); 1993 if (p->p_flag & P_SA) 1994 thread_exit(); 1995 else 1996 thr_exit1(); 1997 } 1998 1999 /* 2000 * When a thread suspends, it just 2001 * moves to the processes's suspend queue 2002 * and stays there. 2003 */ 2004 thread_suspend_one(td); 2005 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 2006 if (p->p_numthreads == p->p_suspcount) { 2007 thread_unsuspend_one(p->p_singlethread); 2008 } 2009 } 2010 DROP_GIANT(); 2011 PROC_UNLOCK(p); 2012 p->p_stats->p_ru.ru_nivcsw++; 2013 mi_switch(); 2014 mtx_unlock_spin(&sched_lock); 2015 PICKUP_GIANT(); 2016 PROC_LOCK(p); 2017 } 2018 return (0); 2019} 2020 2021void 2022thread_suspend_one(struct thread *td) 2023{ 2024 struct proc *p = td->td_proc; 2025 2026 mtx_assert(&sched_lock, MA_OWNED); 2027 PROC_LOCK_ASSERT(p, MA_OWNED); 2028 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 2029 p->p_suspcount++; 2030 TD_SET_SUSPENDED(td); 2031 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 2032 /* 2033 * Hack: If we are suspending but are on the sleep queue 2034 * then we are in msleep or the cv equivalent. We 2035 * want to look like we have two Inhibitors. 2036 * May already be set.. doesn't matter. 2037 */ 2038 if (TD_ON_SLEEPQ(td)) 2039 TD_SET_SLEEPING(td); 2040} 2041 2042void 2043thread_unsuspend_one(struct thread *td) 2044{ 2045 struct proc *p = td->td_proc; 2046 2047 mtx_assert(&sched_lock, MA_OWNED); 2048 PROC_LOCK_ASSERT(p, MA_OWNED); 2049 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 2050 TD_CLR_SUSPENDED(td); 2051 p->p_suspcount--; 2052 setrunnable(td); 2053} 2054 2055/* 2056 * Allow all threads blocked by single threading to continue running. 2057 */ 2058void 2059thread_unsuspend(struct proc *p) 2060{ 2061 struct thread *td; 2062 2063 mtx_assert(&sched_lock, MA_OWNED); 2064 PROC_LOCK_ASSERT(p, MA_OWNED); 2065 if (!P_SHOULDSTOP(p)) { 2066 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2067 thread_unsuspend_one(td); 2068 } 2069 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 2070 (p->p_numthreads == p->p_suspcount)) { 2071 /* 2072 * Stopping everything also did the job for the single 2073 * threading request. Now we've downgraded to single-threaded, 2074 * let it continue. 2075 */ 2076 thread_unsuspend_one(p->p_singlethread); 2077 } 2078} 2079 2080void 2081thread_single_end(void) 2082{ 2083 struct thread *td; 2084 struct proc *p; 2085 2086 td = curthread; 2087 p = td->td_proc; 2088 PROC_LOCK_ASSERT(p, MA_OWNED); 2089 p->p_flag &= ~P_STOPPED_SINGLE; 2090 mtx_lock_spin(&sched_lock); 2091 p->p_singlethread = NULL; 2092 /* 2093 * If there are other threads they mey now run, 2094 * unless of course there is a blanket 'stop order' 2095 * on the process. The single threader must be allowed 2096 * to continue however as this is a bad place to stop. 2097 */ 2098 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2099 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2100 thread_unsuspend_one(td); 2101 } 2102 } 2103 mtx_unlock_spin(&sched_lock); 2104} 2105 2106
|