Deleted Added
full compact
kern_thread.c (170292) kern_thread.c (170296)
1/*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 13 unchanged lines hidden (view full) ---

22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 13 unchanged lines hidden (view full) ---

22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 170292 2007-06-04 21:45:18Z attilio $");
30__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 170296 2007-06-04 23:52:24Z jeff $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/resourcevar.h>

--- 26 unchanged lines hidden (view full) ---

65SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
66 &max_threads_hits, 0, "");
67
68#ifdef KSE
69int virtual_cpu;
70
71#endif
72TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/resourcevar.h>

--- 26 unchanged lines hidden (view full) ---

65SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
66 &max_threads_hits, 0, "");
67
68#ifdef KSE
69int virtual_cpu;
70
71#endif
72TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
73struct mtx kse_zombie_lock;
74MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
73struct mtx zombie_lock;
74MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
75
76#ifdef KSE
77static int
78sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
79{
80 int error, new_val;
81 int def_val;
82

--- 33 unchanged lines hidden (view full) ---

116 td->td_oncpu = NOCPU;
117
118 td->td_tid = alloc_unr(tid_unrhdr);
119 td->td_syscalls = 0;
120
121 /*
122 * Note that td_critnest begins life as 1 because the thread is not
123 * running and is thereby implicitly waiting to be on the receiving
75
76#ifdef KSE
77static int
78sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
79{
80 int error, new_val;
81 int def_val;
82

--- 33 unchanged lines hidden (view full) ---

116 td->td_oncpu = NOCPU;
117
118 td->td_tid = alloc_unr(tid_unrhdr);
119 td->td_syscalls = 0;
120
121 /*
122 * Note that td_critnest begins life as 1 because the thread is not
123 * running and is thereby implicitly waiting to be on the receiving
124 * end of a context switch. A context switch must occur inside a
125 * critical section, and in fact, includes hand-off of the sched_lock.
126 * After a context switch to a newly created thread, it will release
127 * sched_lock for the first time, and its td_critnest will hit 0 for
128 * the first time. This happens on the far end of a context switch,
129 * and when it context switches away from itself, it will in fact go
130 * back into a critical section, and hand off the sched lock to the
131 * next thread.
124 * end of a context switch.
132 */
133 td->td_critnest = 1;
134
135#ifdef AUDIT
136 audit_thread_alloc(td);
137#endif
138 umtx_thread_alloc(td);
139 return (0);

--- 77 unchanged lines hidden (view full) ---

217 * called from:
218 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
219 * proc_dtor() (should go away)
220 * proc_init()
221 */
222void
223proc_linkup(struct proc *p, struct thread *td)
224{
125 */
126 td->td_critnest = 1;
127
128#ifdef AUDIT
129 audit_thread_alloc(td);
130#endif
131 umtx_thread_alloc(td);
132 return (0);

--- 77 unchanged lines hidden (view full) ---

210 * called from:
211 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
212 * proc_dtor() (should go away)
213 * proc_init()
214 */
215void
216proc_linkup(struct proc *p, struct thread *td)
217{
218
225 TAILQ_INIT(&p->p_threads); /* all threads in proc */
226 TAILQ_INIT(&p->p_upcalls); /* upcall list */
227 sigqueue_init(&p->p_sigqueue, p);
228 p->p_ksi = ksiginfo_alloc(1);
229 if (p->p_ksi != NULL) {
230 /* XXX p_ksi may be null if ksiginfo zone is not ready */
231 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
232 }

--- 22 unchanged lines hidden (view full) ---

255
256/*
257 * Stash an embarasingly extra thread into the zombie thread queue.
258 * Use the slpq as that must be unused by now.
259 */
260void
261thread_stash(struct thread *td)
262{
219 TAILQ_INIT(&p->p_threads); /* all threads in proc */
220 TAILQ_INIT(&p->p_upcalls); /* upcall list */
221 sigqueue_init(&p->p_sigqueue, p);
222 p->p_ksi = ksiginfo_alloc(1);
223 if (p->p_ksi != NULL) {
224 /* XXX p_ksi may be null if ksiginfo zone is not ready */
225 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
226 }

--- 22 unchanged lines hidden (view full) ---

249
250/*
251 * Stash an embarasingly extra thread into the zombie thread queue.
252 * Use the slpq as that must be unused by now.
253 */
254void
255thread_stash(struct thread *td)
256{
263 mtx_lock_spin(&kse_zombie_lock);
257 mtx_lock_spin(&zombie_lock);
264 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
258 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
265 mtx_unlock_spin(&kse_zombie_lock);
259 mtx_unlock_spin(&zombie_lock);
266}
267
268/*
269 * Reap zombie kse resource.
270 */
271void
272thread_reap(void)
273{
274 struct thread *td_first, *td_next;
275
276 /*
277 * Don't even bother to lock if none at this instant,
278 * we really don't care about the next instant..
279 */
280 if (!TAILQ_EMPTY(&zombie_threads)) {
260}
261
262/*
263 * Reap zombie kse resource.
264 */
265void
266thread_reap(void)
267{
268 struct thread *td_first, *td_next;
269
270 /*
271 * Don't even bother to lock if none at this instant,
272 * we really don't care about the next instant..
273 */
274 if (!TAILQ_EMPTY(&zombie_threads)) {
281 mtx_lock_spin(&kse_zombie_lock);
275 mtx_lock_spin(&zombie_lock);
282 td_first = TAILQ_FIRST(&zombie_threads);
283 if (td_first)
284 TAILQ_INIT(&zombie_threads);
276 td_first = TAILQ_FIRST(&zombie_threads);
277 if (td_first)
278 TAILQ_INIT(&zombie_threads);
285 mtx_unlock_spin(&kse_zombie_lock);
279 mtx_unlock_spin(&zombie_lock);
286 while (td_first) {
287 td_next = TAILQ_NEXT(td_first, td_slpq);
288 if (td_first->td_ucred)
289 crfree(td_first->td_ucred);
290 thread_free(td_first);
291 td_first = td_next;
292 }
293 }

--- 59 unchanged lines hidden (view full) ---

353 uint64_t new_switchtime;
354 struct thread *td;
355 struct thread *td2;
356 struct proc *p;
357
358 td = curthread;
359 p = td->td_proc;
360
280 while (td_first) {
281 td_next = TAILQ_NEXT(td_first, td_slpq);
282 if (td_first->td_ucred)
283 crfree(td_first->td_ucred);
284 thread_free(td_first);
285 td_first = td_next;
286 }
287 }

--- 59 unchanged lines hidden (view full) ---

347 uint64_t new_switchtime;
348 struct thread *td;
349 struct thread *td2;
350 struct proc *p;
351
352 td = curthread;
353 p = td->td_proc;
354
361 mtx_assert(&sched_lock, MA_OWNED);
355 PROC_SLOCK_ASSERT(p, MA_OWNED);
362 mtx_assert(&Giant, MA_NOTOWNED);
356 mtx_assert(&Giant, MA_NOTOWNED);
357
363 PROC_LOCK_ASSERT(p, MA_OWNED);
364 KASSERT(p != NULL, ("thread exiting without a process"));
365 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
366 (long)p->p_pid, p->p_comm);
367 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
368
369#ifdef AUDIT
370 AUDIT_SYSCALL_EXIT(0, td);

--- 14 unchanged lines hidden (view full) ---

385
386 /*
387 * drop FPU & debug register state storage, or any other
388 * architecture specific resources that
389 * would not be on a new untouched process.
390 */
391 cpu_thread_exit(td); /* XXXSMP */
392
358 PROC_LOCK_ASSERT(p, MA_OWNED);
359 KASSERT(p != NULL, ("thread exiting without a process"));
360 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
361 (long)p->p_pid, p->p_comm);
362 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
363
364#ifdef AUDIT
365 AUDIT_SYSCALL_EXIT(0, td);

--- 14 unchanged lines hidden (view full) ---

380
381 /*
382 * drop FPU & debug register state storage, or any other
383 * architecture specific resources that
384 * would not be on a new untouched process.
385 */
386 cpu_thread_exit(td); /* XXXSMP */
387
393#ifdef KSE
394 /*
395 * The thread is exiting. scheduler can release its stuff
396 * and collect stats etc.
397 * XXX this is not very right, since PROC_UNLOCK may still
398 * need scheduler stuff.
399 */
400 sched_thread_exit(td);
401#endif
402
403 /* Do the same timestamp bookkeeping that mi_switch() would do. */
404 new_switchtime = cpu_ticks();
405 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
406 PCPU_SET(switchtime, new_switchtime);
407 PCPU_SET(switchticks, ticks);
408 PCPU_INC(cnt.v_swtch);
388 /* Do the same timestamp bookkeeping that mi_switch() would do. */
389 new_switchtime = cpu_ticks();
390 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
391 PCPU_SET(switchtime, new_switchtime);
392 PCPU_SET(switchticks, ticks);
393 PCPU_INC(cnt.v_swtch);
409 /*
410 * Aggregate this thread's tick stats in the parent so they are not
411 * lost. Also add the child usage to our own when the final thread
412 * exits.
413 */
414 ruxagg(&p->p_rux, td);
394 /* Add the child usage to our own when the final thread exits. */
415 if (p->p_numthreads == 1)
416 ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
417 /*
418 * The last thread is left attached to the process
419 * So that the whole bundle gets recycled. Skip
420 * all this stuff if we never had threads.
421 * EXIT clears all sign of other threads when
422 * it goes to single threading, so the last thread always
423 * takes the short path.
424 */
425 if (p->p_flag & P_HADTHREADS) {
426 if (p->p_numthreads > 1) {
395 if (p->p_numthreads == 1)
396 ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
397 /*
398 * The last thread is left attached to the process
399 * So that the whole bundle gets recycled. Skip
400 * all this stuff if we never had threads.
401 * EXIT clears all sign of other threads when
402 * it goes to single threading, so the last thread always
403 * takes the short path.
404 */
405 if (p->p_flag & P_HADTHREADS) {
406 if (p->p_numthreads > 1) {
407 thread_lock(td);
408#ifdef KSE
409 kse_unlink(td);
410#else
427 thread_unlink(td);
411 thread_unlink(td);
412#endif
413 thread_unlock(td);
428 /* Impart our resource usage on another thread */
429 td2 = FIRST_THREAD_IN_PROC(p);
430 rucollect(&td2->td_ru, &td->td_ru);
431 sched_exit_thread(td2, td);
432
433 /*
434 * The test below is NOT true if we are the
435 * sole exiting thread. P_STOPPED_SNGL is unset
436 * in exit1() after it is the only survivor.
437 */
438 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
439 if (p->p_numthreads == p->p_suspcount) {
414 /* Impart our resource usage on another thread */
415 td2 = FIRST_THREAD_IN_PROC(p);
416 rucollect(&td2->td_ru, &td->td_ru);
417 sched_exit_thread(td2, td);
418
419 /*
420 * The test below is NOT true if we are the
421 * sole exiting thread. P_STOPPED_SNGL is unset
422 * in exit1() after it is the only survivor.
423 */
424 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
425 if (p->p_numthreads == p->p_suspcount) {
426 thread_lock(p->p_singlethread);
440 thread_unsuspend_one(p->p_singlethread);
427 thread_unsuspend_one(p->p_singlethread);
428 thread_unlock(p->p_singlethread);
441 }
442 }
443
444#ifdef KSE
445 /*
446 * Because each upcall structure has an owner thread,
447 * owner thread exits only when process is in exiting
448 * state, so upcall to userland is no longer needed,
449 * deleting upcall structure is safe here.
450 * So when all threads in a group is exited, all upcalls
451 * in the group should be automatically freed.
452 * XXXKSE This is a KSE thing and should be exported
453 * there somehow.
454 */
455 upcall_remove(td);
456#endif
429 }
430 }
431
432#ifdef KSE
433 /*
434 * Because each upcall structure has an owner thread,
435 * owner thread exits only when process is in exiting
436 * state, so upcall to userland is no longer needed,
437 * deleting upcall structure is safe here.
438 * So when all threads in a group is exited, all upcalls
439 * in the group should be automatically freed.
440 * XXXKSE This is a KSE thing and should be exported
441 * there somehow.
442 */
443 upcall_remove(td);
444#endif
457
458 PROC_UNLOCK(p);
459 PCPU_SET(deadthread, td);
460 } else {
461 /*
462 * The last thread is exiting.. but not through exit()
463 * what should we do?
464 * Theoretically this can't happen
465 * exit1() - clears threading flags before coming here
466 * kse_exit() - treats last thread specially
467 * thr_exit() - treats last thread specially
468 * ifdef KSE
469 * thread_user_enter() - only if more exist
470 * thread_userret() - only if more exist
471 * endif
472 * thread_suspend_check() - only if more exist
473 */
474 panic ("thread_exit: Last thread exiting on its own");
475 }
445 PCPU_SET(deadthread, td);
446 } else {
447 /*
448 * The last thread is exiting.. but not through exit()
449 * what should we do?
450 * Theoretically this can't happen
451 * exit1() - clears threading flags before coming here
452 * kse_exit() - treats last thread specially
453 * thr_exit() - treats last thread specially
454 * ifdef KSE
455 * thread_user_enter() - only if more exist
456 * thread_userret() - only if more exist
457 * endif
458 * thread_suspend_check() - only if more exist
459 */
460 panic ("thread_exit: Last thread exiting on its own");
461 }
476 } else {
477 /*
478 * non threaded process comes here.
479 * This includes an EX threaded process that is coming
480 * here via exit1(). (exit1 dethreads the proc first).
481 */
482 PROC_UNLOCK(p);
483 }
462 }
463 PROC_UNLOCK(p);
464 thread_lock(td);
465 /* Aggregate our tick statistics into our parents rux. */
466 ruxagg(&p->p_rux, td);
467 PROC_SUNLOCK(p);
484 td->td_state = TDS_INACTIVE;
485 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
468 td->td_state = TDS_INACTIVE;
469 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
486 cpu_throw(td, choosethread());
470 sched_throw(td);
487 panic("I'm a teapot!");
488 /* NOTREACHED */
489}
490
491/*
492 * Do any thread specific cleanups that may be needed in wait()
493 * called with Giant, proc and schedlock not held.
494 */

--- 32 unchanged lines hidden (view full) ---

527 * proc_linkup()
528 * thread_schedule_upcall()
529 * thr_create()
530 */
531void
532thread_link(struct thread *td, struct proc *p)
533{
534
471 panic("I'm a teapot!");
472 /* NOTREACHED */
473}
474
475/*
476 * Do any thread specific cleanups that may be needed in wait()
477 * called with Giant, proc and schedlock not held.
478 */

--- 32 unchanged lines hidden (view full) ---

511 * proc_linkup()
512 * thread_schedule_upcall()
513 * thr_create()
514 */
515void
516thread_link(struct thread *td, struct proc *p)
517{
518
519 /*
520 * XXX This can't be enabled because it's called for proc0 before
521 * it's spinlock has been created.
522 * PROC_SLOCK_ASSERT(p, MA_OWNED);
523 */
535 td->td_state = TDS_INACTIVE;
536 td->td_proc = p;
537 td->td_flags = 0;
538
539 LIST_INIT(&td->td_contested);
540 sigqueue_init(&td->td_sigqueue, p);
541 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
542 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);

--- 31 unchanged lines hidden (view full) ---

574 * Called from:
575 * thread_exit()
576 */
577void
578thread_unlink(struct thread *td)
579{
580 struct proc *p = td->td_proc;
581
524 td->td_state = TDS_INACTIVE;
525 td->td_proc = p;
526 td->td_flags = 0;
527
528 LIST_INIT(&td->td_contested);
529 sigqueue_init(&td->td_sigqueue, p);
530 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
531 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);

--- 31 unchanged lines hidden (view full) ---

563 * Called from:
564 * thread_exit()
565 */
566void
567thread_unlink(struct thread *td)
568{
569 struct proc *p = td->td_proc;
570
582 mtx_assert(&sched_lock, MA_OWNED);
571 PROC_SLOCK_ASSERT(p, MA_OWNED);
583 TAILQ_REMOVE(&p->p_threads, td, td_plist);
584 p->p_numthreads--;
585 /* could clear a few other things here */
586 /* Must NOT clear links to proc! */
587}
588
589/*
590 * Enforce single-threading.

--- 35 unchanged lines hidden (view full) ---

626 } else {
627 p->p_flag &= ~P_SINGLE_EXIT;
628 if (mode == SINGLE_BOUNDARY)
629 p->p_flag |= P_SINGLE_BOUNDARY;
630 else
631 p->p_flag &= ~P_SINGLE_BOUNDARY;
632 }
633 p->p_flag |= P_STOPPED_SINGLE;
572 TAILQ_REMOVE(&p->p_threads, td, td_plist);
573 p->p_numthreads--;
574 /* could clear a few other things here */
575 /* Must NOT clear links to proc! */
576}
577
578/*
579 * Enforce single-threading.

--- 35 unchanged lines hidden (view full) ---

615 } else {
616 p->p_flag &= ~P_SINGLE_EXIT;
617 if (mode == SINGLE_BOUNDARY)
618 p->p_flag |= P_SINGLE_BOUNDARY;
619 else
620 p->p_flag &= ~P_SINGLE_BOUNDARY;
621 }
622 p->p_flag |= P_STOPPED_SINGLE;
634 mtx_lock_spin(&sched_lock);
623 PROC_SLOCK(p);
635 p->p_singlethread = td;
636 if (mode == SINGLE_EXIT)
637 remaining = p->p_numthreads;
638 else if (mode == SINGLE_BOUNDARY)
639 remaining = p->p_numthreads - p->p_boundary_count;
640 else
641 remaining = p->p_numthreads - p->p_suspcount;
642 while (remaining != 1) {
643 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
644 goto stopme;
645 FOREACH_THREAD_IN_PROC(p, td2) {
646 if (td2 == td)
647 continue;
624 p->p_singlethread = td;
625 if (mode == SINGLE_EXIT)
626 remaining = p->p_numthreads;
627 else if (mode == SINGLE_BOUNDARY)
628 remaining = p->p_numthreads - p->p_boundary_count;
629 else
630 remaining = p->p_numthreads - p->p_suspcount;
631 while (remaining != 1) {
632 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
633 goto stopme;
634 FOREACH_THREAD_IN_PROC(p, td2) {
635 if (td2 == td)
636 continue;
637 thread_lock(td2);
648 td2->td_flags |= TDF_ASTPENDING;
649 if (TD_IS_INHIBITED(td2)) {
650 switch (mode) {
651 case SINGLE_EXIT:
652 if (td->td_flags & TDF_DBSUSPEND)
653 td->td_flags &= ~TDF_DBSUSPEND;
654 if (TD_IS_SUSPENDED(td2))
655 thread_unsuspend_one(td2);

--- 5 unchanged lines hidden (view full) ---

661 if (TD_IS_SUSPENDED(td2) &&
662 !(td2->td_flags & TDF_BOUNDARY))
663 thread_unsuspend_one(td2);
664 if (TD_ON_SLEEPQ(td2) &&
665 (td2->td_flags & TDF_SINTR))
666 sleepq_abort(td2, ERESTART);
667 break;
668 default:
638 td2->td_flags |= TDF_ASTPENDING;
639 if (TD_IS_INHIBITED(td2)) {
640 switch (mode) {
641 case SINGLE_EXIT:
642 if (td->td_flags & TDF_DBSUSPEND)
643 td->td_flags &= ~TDF_DBSUSPEND;
644 if (TD_IS_SUSPENDED(td2))
645 thread_unsuspend_one(td2);

--- 5 unchanged lines hidden (view full) ---

651 if (TD_IS_SUSPENDED(td2) &&
652 !(td2->td_flags & TDF_BOUNDARY))
653 thread_unsuspend_one(td2);
654 if (TD_ON_SLEEPQ(td2) &&
655 (td2->td_flags & TDF_SINTR))
656 sleepq_abort(td2, ERESTART);
657 break;
658 default:
669 if (TD_IS_SUSPENDED(td2))
659 if (TD_IS_SUSPENDED(td2)) {
660 thread_unlock(td2);
670 continue;
661 continue;
662 }
671 /*
672 * maybe other inhibited states too?
673 */
674 if ((td2->td_flags & TDF_SINTR) &&
675 (td2->td_inhibitors &
676 (TDI_SLEEPING | TDI_SWAPPED)))
677 thread_suspend_one(td2);
678 break;
679 }
680 }
681#ifdef SMP
682 else if (TD_IS_RUNNING(td2) && td != td2) {
683 forward_signal(td2);
684 }
685#endif
663 /*
664 * maybe other inhibited states too?
665 */
666 if ((td2->td_flags & TDF_SINTR) &&
667 (td2->td_inhibitors &
668 (TDI_SLEEPING | TDI_SWAPPED)))
669 thread_suspend_one(td2);
670 break;
671 }
672 }
673#ifdef SMP
674 else if (TD_IS_RUNNING(td2) && td != td2) {
675 forward_signal(td2);
676 }
677#endif
678 thread_unlock(td2);
686 }
687 if (mode == SINGLE_EXIT)
688 remaining = p->p_numthreads;
689 else if (mode == SINGLE_BOUNDARY)
690 remaining = p->p_numthreads - p->p_boundary_count;
691 else
692 remaining = p->p_numthreads - p->p_suspcount;
693
694 /*
695 * Maybe we suspended some threads.. was it enough?
696 */
697 if (remaining == 1)
698 break;
699
700stopme:
701 /*
702 * Wake us up when everyone else has suspended.
703 * In the mean time we suspend as well.
704 */
679 }
680 if (mode == SINGLE_EXIT)
681 remaining = p->p_numthreads;
682 else if (mode == SINGLE_BOUNDARY)
683 remaining = p->p_numthreads - p->p_boundary_count;
684 else
685 remaining = p->p_numthreads - p->p_suspcount;
686
687 /*
688 * Maybe we suspended some threads.. was it enough?
689 */
690 if (remaining == 1)
691 break;
692
693stopme:
694 /*
695 * Wake us up when everyone else has suspended.
696 * In the mean time we suspend as well.
697 */
705 thread_stopped(p);
706 thread_suspend_one(td);
707 PROC_UNLOCK(p);
708 mi_switch(SW_VOL, NULL);
709 mtx_unlock_spin(&sched_lock);
710 PROC_LOCK(p);
711 mtx_lock_spin(&sched_lock);
698 thread_suspend_switch(td);
712 if (mode == SINGLE_EXIT)
713 remaining = p->p_numthreads;
714 else if (mode == SINGLE_BOUNDARY)
715 remaining = p->p_numthreads - p->p_boundary_count;
716 else
717 remaining = p->p_numthreads - p->p_suspcount;
718 }
719 if (mode == SINGLE_EXIT) {
720 /*
721 * We have gotten rid of all the other threads and we
722 * are about to either exit or exec. In either case,
723 * we try our utmost to revert to being a non-threaded
724 * process.
725 */
726 p->p_singlethread = NULL;
727 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
728 thread_unthread(td);
729 }
699 if (mode == SINGLE_EXIT)
700 remaining = p->p_numthreads;
701 else if (mode == SINGLE_BOUNDARY)
702 remaining = p->p_numthreads - p->p_boundary_count;
703 else
704 remaining = p->p_numthreads - p->p_suspcount;
705 }
706 if (mode == SINGLE_EXIT) {
707 /*
708 * We have gotten rid of all the other threads and we
709 * are about to either exit or exec. In either case,
710 * we try our utmost to revert to being a non-threaded
711 * process.
712 */
713 p->p_singlethread = NULL;
714 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
715 thread_unthread(td);
716 }
730 mtx_unlock_spin(&sched_lock);
717 PROC_SUNLOCK(p);
731 return (0);
732}
733
734/*
735 * Called in from locations that can safely check to see
736 * whether we have to suspend or at least throttle for a
737 * single-thread event (e.g. fork).
738 *

--- 56 unchanged lines hidden (view full) ---

795 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
796 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
797 return (ERESTART);
798
799 /* If thread will exit, flush its pending signals */
800 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
801 sigqueue_flush(&td->td_sigqueue);
802
718 return (0);
719}
720
721/*
722 * Called in from locations that can safely check to see
723 * whether we have to suspend or at least throttle for a
724 * single-thread event (e.g. fork).
725 *

--- 56 unchanged lines hidden (view full) ---

782 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
783 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
784 return (ERESTART);
785
786 /* If thread will exit, flush its pending signals */
787 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
788 sigqueue_flush(&td->td_sigqueue);
789
803 mtx_lock_spin(&sched_lock);
790 PROC_SLOCK(p);
804 thread_stopped(p);
805 /*
806 * If the process is waiting for us to exit,
807 * this thread should just suicide.
808 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
809 */
810 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
811 thread_exit();
791 thread_stopped(p);
792 /*
793 * If the process is waiting for us to exit,
794 * this thread should just suicide.
795 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
796 */
797 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
798 thread_exit();
812
799 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
800 if (p->p_numthreads == p->p_suspcount + 1) {
801 thread_lock(p->p_singlethread);
802 thread_unsuspend_one(p->p_singlethread);
803 thread_unlock(p->p_singlethread);
804 }
805 }
806 PROC_UNLOCK(p);
807 thread_lock(td);
813 /*
814 * When a thread suspends, it just
815 * gets taken off all queues.
816 */
817 thread_suspend_one(td);
818 if (return_instead == 0) {
819 p->p_boundary_count++;
820 td->td_flags |= TDF_BOUNDARY;
821 }
808 /*
809 * When a thread suspends, it just
810 * gets taken off all queues.
811 */
812 thread_suspend_one(td);
813 if (return_instead == 0) {
814 p->p_boundary_count++;
815 td->td_flags |= TDF_BOUNDARY;
816 }
822 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
823 if (p->p_numthreads == p->p_suspcount)
824 thread_unsuspend_one(p->p_singlethread);
825 }
826 PROC_UNLOCK(p);
817 PROC_SUNLOCK(p);
827 mi_switch(SW_INVOL, NULL);
818 mi_switch(SW_INVOL, NULL);
828 if (return_instead == 0) {
829 p->p_boundary_count--;
819 if (return_instead == 0)
830 td->td_flags &= ~TDF_BOUNDARY;
820 td->td_flags &= ~TDF_BOUNDARY;
831 }
832 mtx_unlock_spin(&sched_lock);
821 thread_unlock(td);
833 PROC_LOCK(p);
822 PROC_LOCK(p);
823 if (return_instead == 0)
824 p->p_boundary_count--;
834 }
835 return (0);
836}
837
838void
825 }
826 return (0);
827}
828
829void
830thread_suspend_switch(struct thread *td)
831{
832 struct proc *p;
833
834 p = td->td_proc;
835 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
836 PROC_LOCK_ASSERT(p, MA_OWNED);
837 PROC_SLOCK_ASSERT(p, MA_OWNED);
838 /*
839 * We implement thread_suspend_one in stages here to avoid
840 * dropping the proc lock while the thread lock is owned.
841 */
842 thread_stopped(p);
843 p->p_suspcount++;
844 PROC_UNLOCK(p);
845 thread_lock(td);
846 TD_SET_SUSPENDED(td);
847 PROC_SUNLOCK(p);
848 DROP_GIANT();
849 mi_switch(SW_VOL, NULL);
850 thread_unlock(td);
851 PICKUP_GIANT();
852 PROC_LOCK(p);
853 PROC_SLOCK(p);
854}
855
856void
839thread_suspend_one(struct thread *td)
840{
841 struct proc *p = td->td_proc;
842
857thread_suspend_one(struct thread *td)
858{
859 struct proc *p = td->td_proc;
860
843 mtx_assert(&sched_lock, MA_OWNED);
844 PROC_LOCK_ASSERT(p, MA_OWNED);
861 PROC_SLOCK_ASSERT(p, MA_OWNED);
862 THREAD_LOCK_ASSERT(td, MA_OWNED);
845 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
846 p->p_suspcount++;
847 TD_SET_SUSPENDED(td);
848}
849
850void
851thread_unsuspend_one(struct thread *td)
852{
853 struct proc *p = td->td_proc;
854
863 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
864 p->p_suspcount++;
865 TD_SET_SUSPENDED(td);
866}
867
868void
869thread_unsuspend_one(struct thread *td)
870{
871 struct proc *p = td->td_proc;
872
855 mtx_assert(&sched_lock, MA_OWNED);
856 PROC_LOCK_ASSERT(p, MA_OWNED);
873 PROC_SLOCK_ASSERT(p, MA_OWNED);
874 THREAD_LOCK_ASSERT(td, MA_OWNED);
857 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
858 TD_CLR_SUSPENDED(td);
859 p->p_suspcount--;
860 setrunnable(td);
861}
862
863/*
864 * Allow all threads blocked by single threading to continue running.
865 */
866void
867thread_unsuspend(struct proc *p)
868{
869 struct thread *td;
870
875 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
876 TD_CLR_SUSPENDED(td);
877 p->p_suspcount--;
878 setrunnable(td);
879}
880
881/*
882 * Allow all threads blocked by single threading to continue running.
883 */
884void
885thread_unsuspend(struct proc *p)
886{
887 struct thread *td;
888
871 mtx_assert(&sched_lock, MA_OWNED);
872 PROC_LOCK_ASSERT(p, MA_OWNED);
889 PROC_LOCK_ASSERT(p, MA_OWNED);
890 PROC_SLOCK_ASSERT(p, MA_OWNED);
873 if (!P_SHOULDSTOP(p)) {
874 FOREACH_THREAD_IN_PROC(p, td) {
891 if (!P_SHOULDSTOP(p)) {
892 FOREACH_THREAD_IN_PROC(p, td) {
893 thread_lock(td);
875 if (TD_IS_SUSPENDED(td)) {
876 thread_unsuspend_one(td);
877 }
894 if (TD_IS_SUSPENDED(td)) {
895 thread_unsuspend_one(td);
896 }
897 thread_unlock(td);
878 }
879 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
880 (p->p_numthreads == p->p_suspcount)) {
881 /*
882 * Stopping everything also did the job for the single
883 * threading request. Now we've downgraded to single-threaded,
884 * let it continue.
885 */
898 }
899 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
900 (p->p_numthreads == p->p_suspcount)) {
901 /*
902 * Stopping everything also did the job for the single
903 * threading request. Now we've downgraded to single-threaded,
904 * let it continue.
905 */
906 thread_lock(p->p_singlethread);
886 thread_unsuspend_one(p->p_singlethread);
907 thread_unsuspend_one(p->p_singlethread);
908 thread_unlock(p->p_singlethread);
887 }
888}
889
890/*
891 * End the single threading mode..
892 */
893void
894thread_single_end(void)
895{
896 struct thread *td;
897 struct proc *p;
898
899 td = curthread;
900 p = td->td_proc;
901 PROC_LOCK_ASSERT(p, MA_OWNED);
902 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
909 }
910}
911
912/*
913 * End the single threading mode..
914 */
915void
916thread_single_end(void)
917{
918 struct thread *td;
919 struct proc *p;
920
921 td = curthread;
922 p = td->td_proc;
923 PROC_LOCK_ASSERT(p, MA_OWNED);
924 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
903 mtx_lock_spin(&sched_lock);
925 PROC_SLOCK(p);
904 p->p_singlethread = NULL;
905 /*
906 * If there are other threads they mey now run,
907 * unless of course there is a blanket 'stop order'
908 * on the process. The single threader must be allowed
909 * to continue however as this is a bad place to stop.
910 */
911 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
912 FOREACH_THREAD_IN_PROC(p, td) {
926 p->p_singlethread = NULL;
927 /*
928 * If there are other threads they mey now run,
929 * unless of course there is a blanket 'stop order'
930 * on the process. The single threader must be allowed
931 * to continue however as this is a bad place to stop.
932 */
933 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
934 FOREACH_THREAD_IN_PROC(p, td) {
935 thread_lock(td);
913 if (TD_IS_SUSPENDED(td)) {
914 thread_unsuspend_one(td);
915 }
936 if (TD_IS_SUSPENDED(td)) {
937 thread_unsuspend_one(td);
938 }
939 thread_unlock(td);
916 }
917 }
940 }
941 }
918 mtx_unlock_spin(&sched_lock);
942 PROC_SUNLOCK(p);
919}
920
921struct thread *
922thread_find(struct proc *p, lwpid_t tid)
923{
924 struct thread *td;
925
926 PROC_LOCK_ASSERT(p, MA_OWNED);
943}
944
945struct thread *
946thread_find(struct proc *p, lwpid_t tid)
947{
948 struct thread *td;
949
950 PROC_LOCK_ASSERT(p, MA_OWNED);
927 mtx_lock_spin(&sched_lock);
951 PROC_SLOCK(p);
928 FOREACH_THREAD_IN_PROC(p, td) {
929 if (td->td_tid == tid)
930 break;
931 }
952 FOREACH_THREAD_IN_PROC(p, td) {
953 if (td->td_tid == tid)
954 break;
955 }
932 mtx_unlock_spin(&sched_lock);
956 PROC_SUNLOCK(p);
933 return (td);
934}
957 return (td);
958}