Lines Matching refs:td

132 static int thread_unsuspend_one(struct thread *td, struct proc *p,
194 struct thread *td;
196 td = (struct thread *)mem;
197 td->td_state = TDS_INACTIVE;
198 td->td_oncpu = NOCPU;
200 td->td_tid = tid_alloc();
207 td->td_critnest = 1;
208 td->td_lend_user_pri = PRI_MAX;
209 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
211 audit_thread_alloc(td);
213 umtx_thread_alloc(td);
223 struct thread *td;
225 td = (struct thread *)mem;
229 switch (td->td_state) {
248 audit_thread_free(td);
251 osd_thread_exit(td);
252 td_softdep_cleanup(td);
253 MPASS(td->td_su == NULL);
255 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
256 tid_free(td->td_tid);
265 struct thread *td;
267 td = (struct thread *)mem;
269 td->td_sleepqueue = sleepq_alloc();
270 td->td_turnstile = turnstile_alloc();
271 td->td_rlqe = NULL;
272 EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
273 umtx_thread_init(td);
274 td->td_kstack = 0;
275 td->td_sel = NULL;
285 struct thread *td;
287 td = (struct thread *)mem;
288 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
289 rlqentry_free(td->td_rlqe);
290 turnstile_free(td->td_turnstile);
291 sleepq_free(td->td_sleepqueue);
292 umtx_thread_fini(td);
293 seltdfini(td);
305 proc_linkup0(struct proc *p, struct thread *td)
308 proc_linkup(p, td);
312 proc_linkup(struct proc *p, struct thread *td)
323 thread_link(td, p);
353 thread_zombie(struct thread *td)
356 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
364 thread_stash(struct thread *td)
366 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
367 thread_zombie(td);
403 struct thread *td;
407 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
408 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
409 if (!vm_thread_new(td, pages)) {
410 uma_zfree(thread_zone, td);
413 cpu_thread_alloc(td);
414 vm_domain_policy_init(&td->td_vm_dom_policy);
415 return (td);
419 thread_alloc_stack(struct thread *td, int pages)
422 KASSERT(td->td_kstack == 0,
424 if (!vm_thread_new(td, pages))
426 cpu_thread_alloc(td);
434 thread_free(struct thread *td)
437 lock_profile_thread_exit(td);
438 if (td->td_cpuset)
439 cpuset_rel(td->td_cpuset);
440 td->td_cpuset = NULL;
441 cpu_thread_free(td);
442 if (td->td_kstack != 0)
443 vm_thread_dispose(td);
444 vm_domain_policy_cleanup(&td->td_vm_dom_policy);
445 callout_drain(&td->td_slpcallout);
446 uma_zfree(thread_zone, td);
460 thread_cow_get(struct thread *newtd, struct thread *td)
463 newtd->td_ucred = crhold(td->td_ucred);
464 newtd->td_limit = lim_hold(td->td_limit);
465 newtd->td_cowgen = td->td_cowgen;
469 thread_cow_free(struct thread *td)
472 if (td->td_ucred != NULL)
473 crfree(td->td_ucred);
474 if (td->td_limit != NULL)
475 lim_free(td->td_limit);
479 thread_cow_update(struct thread *td)
485 p = td->td_proc;
489 if (td->td_ucred != p->p_ucred) {
490 oldcred = td->td_ucred;
491 td->td_ucred = crhold(p->p_ucred);
493 if (td->td_limit != p->p_limit) {
494 oldlimit = td->td_limit;
495 td->td_limit = lim_hold(p->p_limit);
497 td->td_cowgen = p->p_cowgen;
518 struct thread *td;
523 td = curthread;
524 p = td->td_proc;
531 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
532 (long)p->p_pid, td->td_name);
534 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
541 cpu_thread_exit(td);
553 atomic_add_int(&td->td_proc->p_exitthreads, 1);
554 thread_unlink(td);
556 sched_exit_thread(td2, td);
574 PCPU_SET(deadthread, td);
587 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
588 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
592 thread_lock(td);
598 td->td_runtime += runtime;
599 td->td_incruntime += runtime;
605 td->td_ru.ru_nvcsw++;
606 ruxagg(p, td);
607 rucollect(&p->p_ru, &td->td_ru);
610 td->td_state = TDS_INACTIVE;
612 witness_thread_exit(td);
614 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
615 sched_throw(td);
627 struct thread *td;
632 td = FIRST_THREAD_IN_PROC(p);
634 thread_lock(td);
635 thread_unlock(td);
636 lock_profile_thread_exit(td);
637 cpuset_rel(td->td_cpuset);
638 td->td_cpuset = NULL;
639 cpu_thread_clean(td);
640 thread_cow_free(td);
641 callout_drain(&td->td_slpcallout);
651 thread_link(struct thread *td, struct proc *p)
659 td->td_state = TDS_INACTIVE;
660 td->td_proc = p;
661 td->td_flags = TDF_INMEM;
663 LIST_INIT(&td->td_contested);
664 LIST_INIT(&td->td_lprof[0]);
665 LIST_INIT(&td->td_lprof[1]);
666 sigqueue_init(&td->td_sigqueue, p);
667 callout_init(&td->td_slpcallout, 1);
668 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
677 thread_unlink(struct thread *td)
679 struct proc *p = td->td_proc;
682 TAILQ_REMOVE(&p->p_threads, td, td_plist);
778 struct thread *td;
782 td = curthread;
789 * adjusted to also account for td->td_proc != p. For now
792 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
793 (mode != SINGLE_ALLPROC && td->td_proc == p),
794 ("mode %d proc %p curproc %p", mode, p, td->td_proc));
802 if (p->p_singlethread != NULL && p->p_singlethread != td)
819 p->p_singlethread = td;
826 if (td2 == td)
833 } else if (TD_IS_RUNNING(td2) && td != td2) {
854 thread_suspend_switch(td, p);
873 sched_relinquish(td);
891 if (td2 == td)
895 ("td %p not on boundary", td2));
897 ("td %p is not suspended", td2));
909 struct thread *td;
911 td = curthread;
912 p = td->td_proc;
915 (td->td_dbgflags & TDB_SUSPEND) != 0));
954 struct thread *td;
958 td = curthread;
959 p = td->td_proc;
972 if (p->p_singlethread == td)
986 if ((td->td_flags & TDF_SBDRY) != 0) {
989 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
992 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1000 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1008 (p->p_sysent->sv_thread_detach)(td);
1009 umtx_thread_exit(td);
1010 kern_thr_exit(td);
1027 thread_lock(td);
1032 thread_suspend_one(td);
1035 td->td_flags |= TDF_BOUNDARY;
1039 thread_unlock(td);
1046 thread_suspend_switch(struct thread *td, struct proc *p)
1049 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1056 if (p == td->td_proc) {
1061 thread_lock(td);
1062 td->td_flags &= ~TDF_NEEDSUSPCHK;
1063 TD_SET_SUSPENDED(td);
1064 sched_sleep(td, 0);
1068 thread_unlock(td);
1075 thread_suspend_one(struct thread *td)
1079 p = td->td_proc;
1081 THREAD_LOCK_ASSERT(td, MA_OWNED);
1082 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1084 td->td_flags &= ~TDF_NEEDSUSPCHK;
1085 TD_SET_SUSPENDED(td);
1086 sched_sleep(td, 0);
1090 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1093 THREAD_LOCK_ASSERT(td, MA_OWNED);
1094 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1095 TD_CLR_SUSPENDED(td);
1096 td->td_flags &= ~TDF_ALLPROCSUSP;
1097 if (td->td_proc == p) {
1100 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1101 td->td_flags &= ~TDF_BOUNDARY;
1105 return (setrunnable(td));
1114 struct thread *td;
1121 FOREACH_THREAD_IN_PROC(p, td) {
1122 thread_lock(td);
1123 if (TD_IS_SUSPENDED(td)) {
1124 wakeup_swapper |= thread_unsuspend_one(td, p,
1127 thread_unlock(td);
1153 struct thread *td;
1181 FOREACH_THREAD_IN_PROC(p, td) {
1182 thread_lock(td);
1183 if (TD_IS_SUSPENDED(td)) {
1184 wakeup_swapper |= thread_unsuspend_one(td, p,
1187 thread_unlock(td);
1200 struct thread *td;
1203 FOREACH_THREAD_IN_PROC(p, td) {
1204 if (td->td_tid == tid)
1207 return (td);
1215 struct thread *td;
1219 LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1220 if (td->td_tid == tid) {
1221 if (pid != -1 && td->td_proc->p_pid != pid) {
1222 td = NULL;
1225 PROC_LOCK(td->td_proc);
1226 if (td->td_proc->p_state == PRS_NEW) {
1227 PROC_UNLOCK(td->td_proc);
1228 td = NULL;
1233 LIST_REMOVE(td, td_hash);
1234 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1235 td, td_hash);
1237 return (td);
1245 return (td);
1249 tidhash_add(struct thread *td)
1252 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1257 tidhash_remove(struct thread *td)
1260 LIST_REMOVE(td, td_hash);