Deleted Added
full compact
kern_thread.c (114400) kern_thread.c (115084)
1/*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
1/*
2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
28 * $FreeBSD: head/sys/kern/kern_thread.c 114400 2003-05-01 12:16:06Z davidxu $
28 * $FreeBSD: head/sys/kern/kern_thread.c 115084 2003-05-16 21:26:42Z marcel $
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/smp.h>
39#include <sys/sysctl.h>
40#include <sys/sysproto.h>
41#include <sys/filedesc.h>
42#include <sys/sched.h>
43#include <sys/signalvar.h>
44#include <sys/sx.h>
45#include <sys/tty.h>
46#include <sys/user.h>
47#include <sys/jail.h>
48#include <sys/kse.h>
49#include <sys/ktr.h>
50#include <sys/ucontext.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/pmap.h>
55#include <vm/uma.h>
56#include <vm/vm_map.h>
57
58#include <machine/frame.h>
59
60/*
61 * KSEGRP related storage.
62 */
63static uma_zone_t ksegrp_zone;
64static uma_zone_t kse_zone;
65static uma_zone_t thread_zone;
66static uma_zone_t upcall_zone;
67
68/* DEBUG ONLY */
69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
70static int thread_debug = 0;
71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
72 &thread_debug, 0, "thread debug");
73
74static int max_threads_per_proc = 150;
75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
76 &max_threads_per_proc, 0, "Limit on threads per proc");
77
78static int max_groups_per_proc = 50;
79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
80 &max_groups_per_proc, 0, "Limit on thread groups per proc");
81
82static int max_threads_hits;
83SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
84 &max_threads_hits, 0, "");
85
86static int virtual_cpu;
87
88#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
89
90TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
91TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
92TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
93TAILQ_HEAD(, kse_upcall) zombie_upcalls =
94 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
95struct mtx kse_zombie_lock;
96MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
97
98static void kse_purge(struct proc *p, struct thread *td);
99static void kse_purge_group(struct thread *td);
100static int thread_update_usr_ticks(struct thread *td, int user);
101static void thread_alloc_spare(struct thread *td, struct thread *spare);
102
103static int
104sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
105{
106 int error, new_val;
107 int def_val;
108
109#ifdef SMP
110 def_val = mp_ncpus;
111#else
112 def_val = 1;
113#endif
114 if (virtual_cpu == 0)
115 new_val = def_val;
116 else
117 new_val = virtual_cpu;
118 error = sysctl_handle_int(oidp, &new_val, 0, req);
119 if (error != 0 || req->newptr == NULL)
120 return (error);
121 if (new_val < 0)
122 return (EINVAL);
123 virtual_cpu = new_val;
124 return (0);
125}
126
127/* DEBUG ONLY */
128SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
129 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
130 "debug virtual cpus");
131
132/*
133 * Prepare a thread for use.
134 */
135static void
136thread_ctor(void *mem, int size, void *arg)
137{
138 struct thread *td;
139
140 td = (struct thread *)mem;
141 td->td_state = TDS_INACTIVE;
142 td->td_oncpu = NOCPU;
143}
144
145/*
146 * Reclaim a thread after use.
147 */
148static void
149thread_dtor(void *mem, int size, void *arg)
150{
151 struct thread *td;
152
153 td = (struct thread *)mem;
154
155#ifdef INVARIANTS
156 /* Verify that this thread is in a safe state to free. */
157 switch (td->td_state) {
158 case TDS_INHIBITED:
159 case TDS_RUNNING:
160 case TDS_CAN_RUN:
161 case TDS_RUNQ:
162 /*
163 * We must never unlink a thread that is in one of
164 * these states, because it is currently active.
165 */
166 panic("bad state for thread unlinking");
167 /* NOTREACHED */
168 case TDS_INACTIVE:
169 break;
170 default:
171 panic("bad thread state");
172 /* NOTREACHED */
173 }
174#endif
175}
176
177/*
178 * Initialize type-stable parts of a thread (when newly created).
179 */
180static void
181thread_init(void *mem, int size)
182{
183 struct thread *td;
184
185 td = (struct thread *)mem;
186 mtx_lock(&Giant);
187 pmap_new_thread(td, 0);
188 mtx_unlock(&Giant);
189 cpu_thread_setup(td);
190 td->td_sched = (struct td_sched *)&td[1];
191}
192
193/*
194 * Tear down type-stable parts of a thread (just before being discarded).
195 */
196static void
197thread_fini(void *mem, int size)
198{
199 struct thread *td;
200
201 td = (struct thread *)mem;
202 pmap_dispose_thread(td);
203}
204
205/*
206 * Initialize type-stable parts of a kse (when newly created).
207 */
208static void
209kse_init(void *mem, int size)
210{
211 struct kse *ke;
212
213 ke = (struct kse *)mem;
214 ke->ke_sched = (struct ke_sched *)&ke[1];
215}
216
217/*
218 * Initialize type-stable parts of a ksegrp (when newly created).
219 */
220static void
221ksegrp_init(void *mem, int size)
222{
223 struct ksegrp *kg;
224
225 kg = (struct ksegrp *)mem;
226 kg->kg_sched = (struct kg_sched *)&kg[1];
227}
228
229/*
230 * KSE is linked into kse group.
231 */
232void
233kse_link(struct kse *ke, struct ksegrp *kg)
234{
235 struct proc *p = kg->kg_proc;
236
237 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
238 kg->kg_kses++;
239 ke->ke_state = KES_UNQUEUED;
240 ke->ke_proc = p;
241 ke->ke_ksegrp = kg;
242 ke->ke_thread = NULL;
243 ke->ke_oncpu = NOCPU;
244 ke->ke_flags = 0;
245}
246
247void
248kse_unlink(struct kse *ke)
249{
250 struct ksegrp *kg;
251
252 mtx_assert(&sched_lock, MA_OWNED);
253 kg = ke->ke_ksegrp;
254 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
255 if (ke->ke_state == KES_IDLE) {
256 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
257 kg->kg_idle_kses--;
258 }
259 if (--kg->kg_kses == 0)
260 ksegrp_unlink(kg);
261 /*
262 * Aggregate stats from the KSE
263 */
264 kse_stash(ke);
265}
266
267void
268ksegrp_link(struct ksegrp *kg, struct proc *p)
269{
270
271 TAILQ_INIT(&kg->kg_threads);
272 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
273 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
274 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
275 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */
276 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
277 kg->kg_proc = p;
278 /*
279 * the following counters are in the -zero- section
280 * and may not need clearing
281 */
282 kg->kg_numthreads = 0;
283 kg->kg_runnable = 0;
284 kg->kg_kses = 0;
285 kg->kg_runq_kses = 0; /* XXXKSE change name */
286 kg->kg_idle_kses = 0;
287 kg->kg_numupcalls = 0;
288 /* link it in now that it's consistent */
289 p->p_numksegrps++;
290 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
291}
292
293void
294ksegrp_unlink(struct ksegrp *kg)
295{
296 struct proc *p;
297
298 mtx_assert(&sched_lock, MA_OWNED);
299 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
300 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
301 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
302
303 p = kg->kg_proc;
304 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
305 p->p_numksegrps--;
306 /*
307 * Aggregate stats from the KSE
308 */
309 ksegrp_stash(kg);
310}
311
312struct kse_upcall *
313upcall_alloc(void)
314{
315 struct kse_upcall *ku;
316
317 ku = uma_zalloc(upcall_zone, M_WAITOK);
318 bzero(ku, sizeof(*ku));
319 return (ku);
320}
321
322void
323upcall_free(struct kse_upcall *ku)
324{
325
326 uma_zfree(upcall_zone, ku);
327}
328
329void
330upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
331{
332
333 mtx_assert(&sched_lock, MA_OWNED);
334 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
335 ku->ku_ksegrp = kg;
336 kg->kg_numupcalls++;
337}
338
339void
340upcall_unlink(struct kse_upcall *ku)
341{
342 struct ksegrp *kg = ku->ku_ksegrp;
343
344 mtx_assert(&sched_lock, MA_OWNED);
345 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
346 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
347 kg->kg_numupcalls--;
348 upcall_stash(ku);
349}
350
351void
352upcall_remove(struct thread *td)
353{
354
355 if (td->td_upcall) {
356 td->td_upcall->ku_owner = NULL;
357 upcall_unlink(td->td_upcall);
358 td->td_upcall = 0;
359 }
360}
361
362/*
363 * For a newly created process,
364 * link up all the structures and its initial threads etc.
365 */
366void
367proc_linkup(struct proc *p, struct ksegrp *kg,
368 struct kse *ke, struct thread *td)
369{
370
371 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
372 TAILQ_INIT(&p->p_threads); /* all threads in proc */
373 TAILQ_INIT(&p->p_suspended); /* Threads suspended */
374 p->p_numksegrps = 0;
375 p->p_numthreads = 0;
376
377 ksegrp_link(kg, p);
378 kse_link(ke, kg);
379 thread_link(td, kg);
380}
381
382/*
383struct kse_thr_interrupt_args {
384 struct kse_thr_mailbox * tmbx;
385};
386*/
387int
388kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
389{
390 struct proc *p;
391 struct thread *td2;
392
393 p = td->td_proc;
394 if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL))
395 return (EINVAL);
396 mtx_lock_spin(&sched_lock);
397 FOREACH_THREAD_IN_PROC(p, td2) {
398 if (td2->td_mailbox == uap->tmbx) {
399 td2->td_flags |= TDF_INTERRUPT;
400 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
401 if (td2->td_flags & TDF_CVWAITQ)
402 cv_abort(td2);
403 else
404 abortsleep(td2);
405 }
406 mtx_unlock_spin(&sched_lock);
407 return (0);
408 }
409 }
410 mtx_unlock_spin(&sched_lock);
411 return (ESRCH);
412}
413
414/*
415struct kse_exit_args {
416 register_t dummy;
417};
418*/
419int
420kse_exit(struct thread *td, struct kse_exit_args *uap)
421{
422 struct proc *p;
423 struct ksegrp *kg;
424 struct kse *ke;
425
426 p = td->td_proc;
427 if (td->td_upcall == NULL || TD_CAN_UNBIND(td))
428 return (EINVAL);
429 kg = td->td_ksegrp;
430 /* Serialize removing upcall */
431 PROC_LOCK(p);
432 mtx_lock_spin(&sched_lock);
433 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) {
434 mtx_unlock_spin(&sched_lock);
435 PROC_UNLOCK(p);
436 return (EDEADLK);
437 }
438 ke = td->td_kse;
439 upcall_remove(td);
440 if (p->p_numthreads == 1) {
441 kse_purge(p, td);
442 p->p_flag &= ~P_THREADED;
443 mtx_unlock_spin(&sched_lock);
444 PROC_UNLOCK(p);
445 } else {
446 if (kg->kg_numthreads == 1) { /* Shutdown a group */
447 kse_purge_group(td);
448 ke->ke_flags |= KEF_EXIT;
449 }
450 thread_stopped(p);
451 thread_exit();
452 /* NOTREACHED */
453 }
454 return (0);
455}
456
457/*
458 * Either becomes an upcall or waits for an awakening event and
459 * then becomes an upcall. Only error cases return.
460 */
461/*
462struct kse_release_args {
463 struct timespec *timeout;
464};
465*/
466int
467kse_release(struct thread *td, struct kse_release_args *uap)
468{
469 struct proc *p;
470 struct ksegrp *kg;
471 struct timespec ts, ts2, ts3, timeout;
472 struct timeval tv;
473 int error;
474
475 p = td->td_proc;
476 kg = td->td_ksegrp;
477 if (td->td_upcall == NULL || TD_CAN_UNBIND(td))
478 return (EINVAL);
479 if (uap->timeout != NULL) {
480 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
481 return (error);
482 getnanouptime(&ts);
483 timespecadd(&ts, &timeout);
484 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
485 }
486 mtx_lock_spin(&sched_lock);
487 /* Change OURSELF to become an upcall. */
488 td->td_flags = TDF_UPCALLING;
489#if 0 /* XXX This shouldn't be necessary */
490 if (p->p_sflag & PS_NEEDSIGCHK)
491 td->td_flags |= TDF_ASTPENDING;
492#endif
493 mtx_unlock_spin(&sched_lock);
494 PROC_LOCK(p);
495 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
496 (kg->kg_completed == NULL)) {
497 kg->kg_upsleeps++;
498 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH,
499 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0));
500 kg->kg_upsleeps--;
501 PROC_UNLOCK(p);
502 if (uap->timeout == NULL || error != EWOULDBLOCK)
503 return (0);
504 getnanouptime(&ts2);
505 if (timespeccmp(&ts2, &ts, >=))
506 return (0);
507 ts3 = ts;
508 timespecsub(&ts3, &ts2);
509 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
510 PROC_LOCK(p);
511 }
512 PROC_UNLOCK(p);
513 return (0);
514}
515
516/* struct kse_wakeup_args {
517 struct kse_mailbox *mbx;
518}; */
519int
520kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
521{
522 struct proc *p;
523 struct ksegrp *kg;
524 struct kse_upcall *ku;
525 struct thread *td2;
526
527 p = td->td_proc;
528 td2 = NULL;
529 ku = NULL;
530 /* KSE-enabled processes only, please. */
531 if (!(p->p_flag & P_THREADED))
532 return (EINVAL);
533 PROC_LOCK(p);
534 mtx_lock_spin(&sched_lock);
535 if (uap->mbx) {
536 FOREACH_KSEGRP_IN_PROC(p, kg) {
537 FOREACH_UPCALL_IN_GROUP(kg, ku) {
538 if (ku->ku_mailbox == uap->mbx)
539 break;
540 }
541 if (ku)
542 break;
543 }
544 } else {
545 kg = td->td_ksegrp;
546 if (kg->kg_upsleeps) {
547 wakeup_one(&kg->kg_completed);
548 mtx_unlock_spin(&sched_lock);
549 PROC_UNLOCK(p);
550 return (0);
551 }
552 ku = TAILQ_FIRST(&kg->kg_upcalls);
553 }
554 if (ku) {
555 if ((td2 = ku->ku_owner) == NULL) {
556 panic("%s: no owner", __func__);
557 } else if (TD_ON_SLEEPQ(td2) &&
558 (td2->td_wchan == &kg->kg_completed)) {
559 abortsleep(td2);
560 } else {
561 ku->ku_flags |= KUF_DOUPCALL;
562 }
563 mtx_unlock_spin(&sched_lock);
564 PROC_UNLOCK(p);
565 return (0);
566 }
567 mtx_unlock_spin(&sched_lock);
568 PROC_UNLOCK(p);
569 return (ESRCH);
570}
571
572/*
573 * No new KSEG: first call: use current KSE, don't schedule an upcall
574 * All other situations, do allocate max new KSEs and schedule an upcall.
575 */
576/* struct kse_create_args {
577 struct kse_mailbox *mbx;
578 int newgroup;
579}; */
580int
581kse_create(struct thread *td, struct kse_create_args *uap)
582{
583 struct kse *newke;
584 struct ksegrp *newkg;
585 struct ksegrp *kg;
586 struct proc *p;
587 struct kse_mailbox mbx;
588 struct kse_upcall *newku;
589 int err, ncpus;
590
591 p = td->td_proc;
592 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
593 return (err);
594
595 /* Too bad, why hasn't kernel always a cpu counter !? */
596#ifdef SMP
597 ncpus = mp_ncpus;
598#else
599 ncpus = 1;
600#endif
601 if (thread_debug && virtual_cpu != 0)
602 ncpus = virtual_cpu;
603
604 /* Easier to just set it than to test and set */
605 PROC_LOCK(p);
606 p->p_flag |= P_THREADED;
607 PROC_UNLOCK(p);
608 kg = td->td_ksegrp;
609 if (uap->newgroup) {
610 /* Have race condition but it is cheap */
611 if (p->p_numksegrps >= max_groups_per_proc)
612 return (EPROCLIM);
613 /*
614 * If we want a new KSEGRP it doesn't matter whether
615 * we have already fired up KSE mode before or not.
616 * We put the process in KSE mode and create a new KSEGRP.
617 */
618 newkg = ksegrp_alloc();
619 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
620 kg_startzero, kg_endzero));
621 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
622 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
623 mtx_lock_spin(&sched_lock);
624 if (p->p_numksegrps >= max_groups_per_proc) {
625 mtx_unlock_spin(&sched_lock);
626 ksegrp_free(newkg);
627 return (EPROCLIM);
628 }
629 ksegrp_link(newkg, p);
630 mtx_unlock_spin(&sched_lock);
631 } else {
632 newkg = kg;
633 }
634
635 /*
636 * Creating upcalls more than number of physical cpu does
637 * not help performance.
638 */
639 if (newkg->kg_numupcalls >= ncpus)
640 return (EPROCLIM);
641
642 if (newkg->kg_numupcalls == 0) {
643 /*
644 * Initialize KSE group, optimized for MP.
645 * Create KSEs as many as physical cpus, this increases
646 * concurrent even if userland is not MP safe and can only run
647 * on single CPU (for early version of libpthread, it is true).
648 * In ideal world, every physical cpu should execute a thread.
649 * If there is enough KSEs, threads in kernel can be
650 * executed parallel on different cpus with full speed,
651 * Concurrent in kernel shouldn't be restricted by number of
652 * upcalls userland provides.
653 * Adding more upcall structures only increases concurrent
654 * in userland.
655 * Highest performance configuration is:
656 * N kses = N upcalls = N phyiscal cpus
657 */
658 while (newkg->kg_kses < ncpus) {
659 newke = kse_alloc();
660 bzero(&newke->ke_startzero, RANGEOF(struct kse,
661 ke_startzero, ke_endzero));
662#if 0
663 mtx_lock_spin(&sched_lock);
664 bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
665 RANGEOF(struct kse, ke_startcopy, ke_endcopy));
666 mtx_unlock_spin(&sched_lock);
667#endif
668 mtx_lock_spin(&sched_lock);
669 kse_link(newke, newkg);
670 /* Add engine */
671 kse_reassign(newke);
672 mtx_unlock_spin(&sched_lock);
673 }
674 }
675 newku = upcall_alloc();
676 newku->ku_mailbox = uap->mbx;
677 newku->ku_func = mbx.km_func;
678 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
679
680 /* For the first call this may not have been set */
681 if (td->td_standin == NULL)
682 thread_alloc_spare(td, NULL);
683
684 mtx_lock_spin(&sched_lock);
685 if (newkg->kg_numupcalls >= ncpus) {
686 mtx_unlock_spin(&sched_lock);
687 upcall_free(newku);
688 return (EPROCLIM);
689 }
690 upcall_link(newku, newkg);
691 if (mbx.km_quantum)
692 newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
693
694 /*
695 * Each upcall structure has an owner thread, find which
696 * one owns it.
697 */
698 if (uap->newgroup) {
699 /*
700 * Because new ksegrp hasn't thread,
701 * create an initial upcall thread to own it.
702 */
703 thread_schedule_upcall(td, newku);
704 } else {
705 /*
706 * If current thread hasn't an upcall structure,
707 * just assign the upcall to it.
708 */
709 if (td->td_upcall == NULL) {
710 newku->ku_owner = td;
711 td->td_upcall = newku;
712 } else {
713 /*
714 * Create a new upcall thread to own it.
715 */
716 thread_schedule_upcall(td, newku);
717 }
718 }
719 mtx_unlock_spin(&sched_lock);
720 return (0);
721}
722
723/*
724 * Fill a ucontext_t with a thread's context information.
725 *
726 * This is an analogue to getcontext(3).
727 */
728void
729thread_getcontext(struct thread *td, ucontext_t *uc)
730{
731
732 get_mcontext(td, &uc->uc_mcontext, 0);
733 PROC_LOCK(td->td_proc);
734 uc->uc_sigmask = td->td_sigmask;
735 PROC_UNLOCK(td->td_proc);
736}
737
738/*
739 * Set a thread's context from a ucontext_t.
740 *
741 * This is an analogue to setcontext(3).
742 */
743int
744thread_setcontext(struct thread *td, ucontext_t *uc)
745{
746 int ret;
747
748 ret = set_mcontext(td, &uc->uc_mcontext);
749 if (ret == 0) {
750 SIG_CANTMASK(uc->uc_sigmask);
751 PROC_LOCK(td->td_proc);
752 td->td_sigmask = uc->uc_sigmask;
753 PROC_UNLOCK(td->td_proc);
754 }
755 return (ret);
756}
757
758/*
759 * Initialize global thread allocation resources.
760 */
761void
762threadinit(void)
763{
764
765#ifndef __ia64__
766 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
767 thread_ctor, thread_dtor, thread_init, thread_fini,
768 UMA_ALIGN_CACHE, 0);
769#else
770 /*
771 * XXX the ia64 kstack allocator is really lame and is at the mercy
772 * of contigmallloc(). This hackery is to pre-construct a whole
773 * pile of thread structures with associated kernel stacks early
774 * in the system startup while contigmalloc() still works. Once we
775 * have them, keep them. Sigh.
776 */
777 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
778 thread_ctor, thread_dtor, thread_init, thread_fini,
779 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
780 uma_prealloc(thread_zone, 512); /* XXX arbitary */
781#endif
782 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
783 NULL, NULL, ksegrp_init, NULL,
784 UMA_ALIGN_CACHE, 0);
785 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
786 NULL, NULL, kse_init, NULL,
787 UMA_ALIGN_CACHE, 0);
788 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
789 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
790}
791
792/*
793 * Stash an embarasingly extra thread into the zombie thread queue.
794 */
795void
796thread_stash(struct thread *td)
797{
798 mtx_lock_spin(&kse_zombie_lock);
799 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
800 mtx_unlock_spin(&kse_zombie_lock);
801}
802
803/*
804 * Stash an embarasingly extra kse into the zombie kse queue.
805 */
806void
807kse_stash(struct kse *ke)
808{
809 mtx_lock_spin(&kse_zombie_lock);
810 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
811 mtx_unlock_spin(&kse_zombie_lock);
812}
813
814/*
815 * Stash an embarasingly extra upcall into the zombie upcall queue.
816 */
817
818void
819upcall_stash(struct kse_upcall *ku)
820{
821 mtx_lock_spin(&kse_zombie_lock);
822 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
823 mtx_unlock_spin(&kse_zombie_lock);
824}
825
826/*
827 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
828 */
829void
830ksegrp_stash(struct ksegrp *kg)
831{
832 mtx_lock_spin(&kse_zombie_lock);
833 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
834 mtx_unlock_spin(&kse_zombie_lock);
835}
836
837/*
838 * Reap zombie kse resource.
839 */
840void
841thread_reap(void)
842{
843 struct thread *td_first, *td_next;
844 struct kse *ke_first, *ke_next;
845 struct ksegrp *kg_first, * kg_next;
846 struct kse_upcall *ku_first, *ku_next;
847
848 /*
849 * Don't even bother to lock if none at this instant,
850 * we really don't care about the next instant..
851 */
852 if ((!TAILQ_EMPTY(&zombie_threads))
853 || (!TAILQ_EMPTY(&zombie_kses))
854 || (!TAILQ_EMPTY(&zombie_ksegrps))
855 || (!TAILQ_EMPTY(&zombie_upcalls))) {
856 mtx_lock_spin(&kse_zombie_lock);
857 td_first = TAILQ_FIRST(&zombie_threads);
858 ke_first = TAILQ_FIRST(&zombie_kses);
859 kg_first = TAILQ_FIRST(&zombie_ksegrps);
860 ku_first = TAILQ_FIRST(&zombie_upcalls);
861 if (td_first)
862 TAILQ_INIT(&zombie_threads);
863 if (ke_first)
864 TAILQ_INIT(&zombie_kses);
865 if (kg_first)
866 TAILQ_INIT(&zombie_ksegrps);
867 if (ku_first)
868 TAILQ_INIT(&zombie_upcalls);
869 mtx_unlock_spin(&kse_zombie_lock);
870 while (td_first) {
871 td_next = TAILQ_NEXT(td_first, td_runq);
872 if (td_first->td_ucred)
873 crfree(td_first->td_ucred);
874 thread_free(td_first);
875 td_first = td_next;
876 }
877 while (ke_first) {
878 ke_next = TAILQ_NEXT(ke_first, ke_procq);
879 kse_free(ke_first);
880 ke_first = ke_next;
881 }
882 while (kg_first) {
883 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
884 ksegrp_free(kg_first);
885 kg_first = kg_next;
886 }
887 while (ku_first) {
888 ku_next = TAILQ_NEXT(ku_first, ku_link);
889 upcall_free(ku_first);
890 ku_first = ku_next;
891 }
892 }
893}
894
895/*
896 * Allocate a ksegrp.
897 */
898struct ksegrp *
899ksegrp_alloc(void)
900{
901 return (uma_zalloc(ksegrp_zone, M_WAITOK));
902}
903
904/*
905 * Allocate a kse.
906 */
907struct kse *
908kse_alloc(void)
909{
910 return (uma_zalloc(kse_zone, M_WAITOK));
911}
912
913/*
914 * Allocate a thread.
915 */
916struct thread *
917thread_alloc(void)
918{
919 thread_reap(); /* check if any zombies to get */
920 return (uma_zalloc(thread_zone, M_WAITOK));
921}
922
923/*
924 * Deallocate a ksegrp.
925 */
926void
927ksegrp_free(struct ksegrp *td)
928{
929 uma_zfree(ksegrp_zone, td);
930}
931
932/*
933 * Deallocate a kse.
934 */
935void
936kse_free(struct kse *td)
937{
938 uma_zfree(kse_zone, td);
939}
940
941/*
942 * Deallocate a thread.
943 */
944void
945thread_free(struct thread *td)
946{
947
948 cpu_thread_clean(td);
949 uma_zfree(thread_zone, td);
950}
951
952/*
953 * Store the thread context in the UTS's mailbox.
954 * then add the mailbox at the head of a list we are building in user space.
955 * The list is anchored in the ksegrp structure.
956 */
957int
958thread_export_context(struct thread *td)
959{
960 struct proc *p;
961 struct ksegrp *kg;
962 uintptr_t mbx;
963 void *addr;
964 int error,temp;
965 ucontext_t uc;
966
967 p = td->td_proc;
968 kg = td->td_ksegrp;
969
970 /* Export the user/machine context. */
971 addr = (void *)(&td->td_mailbox->tm_context);
972 error = copyin(addr, &uc, sizeof(ucontext_t));
973 if (error)
974 goto bad;
975
976 thread_getcontext(td, &uc);
977 error = copyout(&uc, addr, sizeof(ucontext_t));
978 if (error)
979 goto bad;
980
981 /* Exports clock ticks in kernel mode */
982 addr = (caddr_t)(&td->td_mailbox->tm_sticks);
983 temp = fuword(addr) + td->td_usticks;
984 if (suword(addr, temp))
985 goto bad;
986
987 /* Get address in latest mbox of list pointer */
988 addr = (void *)(&td->td_mailbox->tm_next);
989 /*
990 * Put the saved address of the previous first
991 * entry into this one
992 */
993 for (;;) {
994 mbx = (uintptr_t)kg->kg_completed;
995 if (suword(addr, mbx)) {
996 error = EFAULT;
997 goto bad;
998 }
999 PROC_LOCK(p);
1000 if (mbx == (uintptr_t)kg->kg_completed) {
1001 kg->kg_completed = td->td_mailbox;
1002 /*
1003 * The thread context may be taken away by
1004 * other upcall threads when we unlock
1005 * process lock. it's no longer valid to
1006 * use it again in any other places.
1007 */
1008 td->td_mailbox = NULL;
1009 PROC_UNLOCK(p);
1010 break;
1011 }
1012 PROC_UNLOCK(p);
1013 }
1014 td->td_usticks = 0;
1015 return (0);
1016
1017bad:
1018 PROC_LOCK(p);
1019 psignal(p, SIGSEGV);
1020 PROC_UNLOCK(p);
1021 /* The mailbox is bad, don't use it */
1022 td->td_mailbox = NULL;
1023 td->td_usticks = 0;
1024 return (error);
1025}
1026
1027/*
1028 * Take the list of completed mailboxes for this KSEGRP and put them on this
1029 * upcall's mailbox as it's the next one going up.
1030 */
1031static int
1032thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1033{
1034 struct proc *p = kg->kg_proc;
1035 void *addr;
1036 uintptr_t mbx;
1037
1038 addr = (void *)(&ku->ku_mailbox->km_completed);
1039 for (;;) {
1040 mbx = (uintptr_t)kg->kg_completed;
1041 if (suword(addr, mbx)) {
1042 PROC_LOCK(p);
1043 psignal(p, SIGSEGV);
1044 PROC_UNLOCK(p);
1045 return (EFAULT);
1046 }
1047 PROC_LOCK(p);
1048 if (mbx == (uintptr_t)kg->kg_completed) {
1049 kg->kg_completed = NULL;
1050 PROC_UNLOCK(p);
1051 break;
1052 }
1053 PROC_UNLOCK(p);
1054 }
1055 return (0);
1056}
1057
1058/*
1059 * This function should be called at statclock interrupt time
1060 */
1061int
1062thread_statclock(int user)
1063{
1064 struct thread *td = curthread;
1065
1066 if (td->td_ksegrp->kg_numupcalls == 0)
1067 return (-1);
1068 if (user) {
1069 /* Current always do via ast() */
1070 mtx_lock_spin(&sched_lock);
1071 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1072 mtx_unlock_spin(&sched_lock);
1073 td->td_uuticks++;
1074 } else {
1075 if (td->td_mailbox != NULL)
1076 td->td_usticks++;
1077 else {
1078 /* XXXKSE
1079 * We will call thread_user_enter() for every
1080 * kernel entry in future, so if the thread mailbox
1081 * is NULL, it must be a UTS kernel, don't account
1082 * clock ticks for it.
1083 */
1084 }
1085 }
1086 return (0);
1087}
1088
1089/*
1090 * Export state clock ticks for userland
1091 */
1092static int
1093thread_update_usr_ticks(struct thread *td, int user)
1094{
1095 struct proc *p = td->td_proc;
1096 struct kse_thr_mailbox *tmbx;
1097 struct kse_upcall *ku;
1098 struct ksegrp *kg;
1099 caddr_t addr;
1100 uint uticks;
1101
1102 if ((ku = td->td_upcall) == NULL)
1103 return (-1);
1104
1105 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1106 if ((tmbx == NULL) || (tmbx == (void *)-1))
1107 return (-1);
1108 if (user) {
1109 uticks = td->td_uuticks;
1110 td->td_uuticks = 0;
1111 addr = (caddr_t)&tmbx->tm_uticks;
1112 } else {
1113 uticks = td->td_usticks;
1114 td->td_usticks = 0;
1115 addr = (caddr_t)&tmbx->tm_sticks;
1116 }
1117 if (uticks) {
1118 if (suword(addr, uticks+fuword(addr))) {
1119 PROC_LOCK(p);
1120 psignal(p, SIGSEGV);
1121 PROC_UNLOCK(p);
1122 return (-2);
1123 }
1124 }
1125 kg = td->td_ksegrp;
1126 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1127 mtx_lock_spin(&sched_lock);
1128 td->td_upcall->ku_flags |= KUF_DOUPCALL;
1129 mtx_unlock_spin(&sched_lock);
1130 }
1131 return (0);
1132}
1133
1134/*
1135 * Discard the current thread and exit from its context.
1136 *
1137 * Because we can't free a thread while we're operating under its context,
1138 * push the current thread into our CPU's deadthread holder. This means
1139 * we needn't worry about someone else grabbing our context before we
1140 * do a cpu_throw().
1141 */
1142void
1143thread_exit(void)
1144{
1145 struct thread *td;
1146 struct kse *ke;
1147 struct proc *p;
1148 struct ksegrp *kg;
1149
1150 td = curthread;
1151 kg = td->td_ksegrp;
1152 p = td->td_proc;
1153 ke = td->td_kse;
1154
1155 mtx_assert(&sched_lock, MA_OWNED);
1156 KASSERT(p != NULL, ("thread exiting without a process"));
1157 KASSERT(ke != NULL, ("thread exiting without a kse"));
1158 KASSERT(kg != NULL, ("thread exiting without a kse group"));
1159 PROC_LOCK_ASSERT(p, MA_OWNED);
1160 CTR1(KTR_PROC, "thread_exit: thread %p", td);
1161 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1162
1163 if (td->td_standin != NULL) {
1164 thread_stash(td->td_standin);
1165 td->td_standin = NULL;
1166 }
1167
1168 cpu_thread_exit(td); /* XXXSMP */
1169
1170 /*
1171 * The last thread is left attached to the process
1172 * So that the whole bundle gets recycled. Skip
1173 * all this stuff.
1174 */
1175 if (p->p_numthreads > 1) {
1176 thread_unlink(td);
1177 if (p->p_maxthrwaits)
1178 wakeup(&p->p_numthreads);
1179 /*
1180 * The test below is NOT true if we are the
1181 * sole exiting thread. P_STOPPED_SNGL is unset
1182 * in exit1() after it is the only survivor.
1183 */
1184 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1185 if (p->p_numthreads == p->p_suspcount) {
1186 thread_unsuspend_one(p->p_singlethread);
1187 }
1188 }
1189
1190 /*
1191 * Because each upcall structure has an owner thread,
1192 * owner thread exits only when process is in exiting
1193 * state, so upcall to userland is no longer needed,
1194 * deleting upcall structure is safe here.
1195 * So when all threads in a group is exited, all upcalls
1196 * in the group should be automatically freed.
1197 */
1198 if (td->td_upcall)
1199 upcall_remove(td);
1200
1201 ke->ke_state = KES_UNQUEUED;
1202 ke->ke_thread = NULL;
1203 /*
1204 * Decide what to do with the KSE attached to this thread.
1205 */
1206 if (ke->ke_flags & KEF_EXIT)
1207 kse_unlink(ke);
1208 else
1209 kse_reassign(ke);
1210 PROC_UNLOCK(p);
1211 td->td_kse = NULL;
1212 td->td_state = TDS_INACTIVE;
1213#if 0
1214 td->td_proc = NULL;
1215#endif
1216 td->td_ksegrp = NULL;
1217 td->td_last_kse = NULL;
1218 PCPU_SET(deadthread, td);
1219 } else {
1220 PROC_UNLOCK(p);
1221 }
1222 /* XXX Shouldn't cpu_throw() here. */
1223 mtx_assert(&sched_lock, MA_OWNED);
29 */
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/smp.h>
39#include <sys/sysctl.h>
40#include <sys/sysproto.h>
41#include <sys/filedesc.h>
42#include <sys/sched.h>
43#include <sys/signalvar.h>
44#include <sys/sx.h>
45#include <sys/tty.h>
46#include <sys/user.h>
47#include <sys/jail.h>
48#include <sys/kse.h>
49#include <sys/ktr.h>
50#include <sys/ucontext.h>
51
52#include <vm/vm.h>
53#include <vm/vm_object.h>
54#include <vm/pmap.h>
55#include <vm/uma.h>
56#include <vm/vm_map.h>
57
58#include <machine/frame.h>
59
60/*
61 * KSEGRP related storage.
62 */
63static uma_zone_t ksegrp_zone;
64static uma_zone_t kse_zone;
65static uma_zone_t thread_zone;
66static uma_zone_t upcall_zone;
67
68/* DEBUG ONLY */
69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
70static int thread_debug = 0;
71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
72 &thread_debug, 0, "thread debug");
73
74static int max_threads_per_proc = 150;
75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
76 &max_threads_per_proc, 0, "Limit on threads per proc");
77
78static int max_groups_per_proc = 50;
79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
80 &max_groups_per_proc, 0, "Limit on thread groups per proc");
81
82static int max_threads_hits;
83SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
84 &max_threads_hits, 0, "");
85
86static int virtual_cpu;
87
88#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
89
90TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
91TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
92TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
93TAILQ_HEAD(, kse_upcall) zombie_upcalls =
94 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
95struct mtx kse_zombie_lock;
96MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
97
98static void kse_purge(struct proc *p, struct thread *td);
99static void kse_purge_group(struct thread *td);
100static int thread_update_usr_ticks(struct thread *td, int user);
101static void thread_alloc_spare(struct thread *td, struct thread *spare);
102
103static int
104sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
105{
106 int error, new_val;
107 int def_val;
108
109#ifdef SMP
110 def_val = mp_ncpus;
111#else
112 def_val = 1;
113#endif
114 if (virtual_cpu == 0)
115 new_val = def_val;
116 else
117 new_val = virtual_cpu;
118 error = sysctl_handle_int(oidp, &new_val, 0, req);
119 if (error != 0 || req->newptr == NULL)
120 return (error);
121 if (new_val < 0)
122 return (EINVAL);
123 virtual_cpu = new_val;
124 return (0);
125}
126
127/* DEBUG ONLY */
128SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
129 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
130 "debug virtual cpus");
131
132/*
133 * Prepare a thread for use.
134 */
135static void
136thread_ctor(void *mem, int size, void *arg)
137{
138 struct thread *td;
139
140 td = (struct thread *)mem;
141 td->td_state = TDS_INACTIVE;
142 td->td_oncpu = NOCPU;
143}
144
145/*
146 * Reclaim a thread after use.
147 */
148static void
149thread_dtor(void *mem, int size, void *arg)
150{
151 struct thread *td;
152
153 td = (struct thread *)mem;
154
155#ifdef INVARIANTS
156 /* Verify that this thread is in a safe state to free. */
157 switch (td->td_state) {
158 case TDS_INHIBITED:
159 case TDS_RUNNING:
160 case TDS_CAN_RUN:
161 case TDS_RUNQ:
162 /*
163 * We must never unlink a thread that is in one of
164 * these states, because it is currently active.
165 */
166 panic("bad state for thread unlinking");
167 /* NOTREACHED */
168 case TDS_INACTIVE:
169 break;
170 default:
171 panic("bad thread state");
172 /* NOTREACHED */
173 }
174#endif
175}
176
177/*
178 * Initialize type-stable parts of a thread (when newly created).
179 */
180static void
181thread_init(void *mem, int size)
182{
183 struct thread *td;
184
185 td = (struct thread *)mem;
186 mtx_lock(&Giant);
187 pmap_new_thread(td, 0);
188 mtx_unlock(&Giant);
189 cpu_thread_setup(td);
190 td->td_sched = (struct td_sched *)&td[1];
191}
192
193/*
194 * Tear down type-stable parts of a thread (just before being discarded).
195 */
196static void
197thread_fini(void *mem, int size)
198{
199 struct thread *td;
200
201 td = (struct thread *)mem;
202 pmap_dispose_thread(td);
203}
204
205/*
206 * Initialize type-stable parts of a kse (when newly created).
207 */
208static void
209kse_init(void *mem, int size)
210{
211 struct kse *ke;
212
213 ke = (struct kse *)mem;
214 ke->ke_sched = (struct ke_sched *)&ke[1];
215}
216
217/*
218 * Initialize type-stable parts of a ksegrp (when newly created).
219 */
220static void
221ksegrp_init(void *mem, int size)
222{
223 struct ksegrp *kg;
224
225 kg = (struct ksegrp *)mem;
226 kg->kg_sched = (struct kg_sched *)&kg[1];
227}
228
229/*
230 * KSE is linked into kse group.
231 */
232void
233kse_link(struct kse *ke, struct ksegrp *kg)
234{
235 struct proc *p = kg->kg_proc;
236
237 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
238 kg->kg_kses++;
239 ke->ke_state = KES_UNQUEUED;
240 ke->ke_proc = p;
241 ke->ke_ksegrp = kg;
242 ke->ke_thread = NULL;
243 ke->ke_oncpu = NOCPU;
244 ke->ke_flags = 0;
245}
246
247void
248kse_unlink(struct kse *ke)
249{
250 struct ksegrp *kg;
251
252 mtx_assert(&sched_lock, MA_OWNED);
253 kg = ke->ke_ksegrp;
254 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
255 if (ke->ke_state == KES_IDLE) {
256 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
257 kg->kg_idle_kses--;
258 }
259 if (--kg->kg_kses == 0)
260 ksegrp_unlink(kg);
261 /*
262 * Aggregate stats from the KSE
263 */
264 kse_stash(ke);
265}
266
267void
268ksegrp_link(struct ksegrp *kg, struct proc *p)
269{
270
271 TAILQ_INIT(&kg->kg_threads);
272 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
273 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
274 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
275 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */
276 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
277 kg->kg_proc = p;
278 /*
279 * the following counters are in the -zero- section
280 * and may not need clearing
281 */
282 kg->kg_numthreads = 0;
283 kg->kg_runnable = 0;
284 kg->kg_kses = 0;
285 kg->kg_runq_kses = 0; /* XXXKSE change name */
286 kg->kg_idle_kses = 0;
287 kg->kg_numupcalls = 0;
288 /* link it in now that it's consistent */
289 p->p_numksegrps++;
290 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
291}
292
293void
294ksegrp_unlink(struct ksegrp *kg)
295{
296 struct proc *p;
297
298 mtx_assert(&sched_lock, MA_OWNED);
299 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
300 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses"));
301 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
302
303 p = kg->kg_proc;
304 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
305 p->p_numksegrps--;
306 /*
307 * Aggregate stats from the KSE
308 */
309 ksegrp_stash(kg);
310}
311
312struct kse_upcall *
313upcall_alloc(void)
314{
315 struct kse_upcall *ku;
316
317 ku = uma_zalloc(upcall_zone, M_WAITOK);
318 bzero(ku, sizeof(*ku));
319 return (ku);
320}
321
322void
323upcall_free(struct kse_upcall *ku)
324{
325
326 uma_zfree(upcall_zone, ku);
327}
328
329void
330upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
331{
332
333 mtx_assert(&sched_lock, MA_OWNED);
334 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
335 ku->ku_ksegrp = kg;
336 kg->kg_numupcalls++;
337}
338
339void
340upcall_unlink(struct kse_upcall *ku)
341{
342 struct ksegrp *kg = ku->ku_ksegrp;
343
344 mtx_assert(&sched_lock, MA_OWNED);
345 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
346 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
347 kg->kg_numupcalls--;
348 upcall_stash(ku);
349}
350
351void
352upcall_remove(struct thread *td)
353{
354
355 if (td->td_upcall) {
356 td->td_upcall->ku_owner = NULL;
357 upcall_unlink(td->td_upcall);
358 td->td_upcall = 0;
359 }
360}
361
362/*
363 * For a newly created process,
364 * link up all the structures and its initial threads etc.
365 */
366void
367proc_linkup(struct proc *p, struct ksegrp *kg,
368 struct kse *ke, struct thread *td)
369{
370
371 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
372 TAILQ_INIT(&p->p_threads); /* all threads in proc */
373 TAILQ_INIT(&p->p_suspended); /* Threads suspended */
374 p->p_numksegrps = 0;
375 p->p_numthreads = 0;
376
377 ksegrp_link(kg, p);
378 kse_link(ke, kg);
379 thread_link(td, kg);
380}
381
382/*
383struct kse_thr_interrupt_args {
384 struct kse_thr_mailbox * tmbx;
385};
386*/
387int
388kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
389{
390 struct proc *p;
391 struct thread *td2;
392
393 p = td->td_proc;
394 if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL))
395 return (EINVAL);
396 mtx_lock_spin(&sched_lock);
397 FOREACH_THREAD_IN_PROC(p, td2) {
398 if (td2->td_mailbox == uap->tmbx) {
399 td2->td_flags |= TDF_INTERRUPT;
400 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
401 if (td2->td_flags & TDF_CVWAITQ)
402 cv_abort(td2);
403 else
404 abortsleep(td2);
405 }
406 mtx_unlock_spin(&sched_lock);
407 return (0);
408 }
409 }
410 mtx_unlock_spin(&sched_lock);
411 return (ESRCH);
412}
413
414/*
415struct kse_exit_args {
416 register_t dummy;
417};
418*/
419int
420kse_exit(struct thread *td, struct kse_exit_args *uap)
421{
422 struct proc *p;
423 struct ksegrp *kg;
424 struct kse *ke;
425
426 p = td->td_proc;
427 if (td->td_upcall == NULL || TD_CAN_UNBIND(td))
428 return (EINVAL);
429 kg = td->td_ksegrp;
430 /* Serialize removing upcall */
431 PROC_LOCK(p);
432 mtx_lock_spin(&sched_lock);
433 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) {
434 mtx_unlock_spin(&sched_lock);
435 PROC_UNLOCK(p);
436 return (EDEADLK);
437 }
438 ke = td->td_kse;
439 upcall_remove(td);
440 if (p->p_numthreads == 1) {
441 kse_purge(p, td);
442 p->p_flag &= ~P_THREADED;
443 mtx_unlock_spin(&sched_lock);
444 PROC_UNLOCK(p);
445 } else {
446 if (kg->kg_numthreads == 1) { /* Shutdown a group */
447 kse_purge_group(td);
448 ke->ke_flags |= KEF_EXIT;
449 }
450 thread_stopped(p);
451 thread_exit();
452 /* NOTREACHED */
453 }
454 return (0);
455}
456
457/*
458 * Either becomes an upcall or waits for an awakening event and
459 * then becomes an upcall. Only error cases return.
460 */
461/*
462struct kse_release_args {
463 struct timespec *timeout;
464};
465*/
466int
467kse_release(struct thread *td, struct kse_release_args *uap)
468{
469 struct proc *p;
470 struct ksegrp *kg;
471 struct timespec ts, ts2, ts3, timeout;
472 struct timeval tv;
473 int error;
474
475 p = td->td_proc;
476 kg = td->td_ksegrp;
477 if (td->td_upcall == NULL || TD_CAN_UNBIND(td))
478 return (EINVAL);
479 if (uap->timeout != NULL) {
480 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
481 return (error);
482 getnanouptime(&ts);
483 timespecadd(&ts, &timeout);
484 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
485 }
486 mtx_lock_spin(&sched_lock);
487 /* Change OURSELF to become an upcall. */
488 td->td_flags = TDF_UPCALLING;
489#if 0 /* XXX This shouldn't be necessary */
490 if (p->p_sflag & PS_NEEDSIGCHK)
491 td->td_flags |= TDF_ASTPENDING;
492#endif
493 mtx_unlock_spin(&sched_lock);
494 PROC_LOCK(p);
495 while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 &&
496 (kg->kg_completed == NULL)) {
497 kg->kg_upsleeps++;
498 error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH,
499 "kse_rel", (uap->timeout ? tvtohz(&tv) : 0));
500 kg->kg_upsleeps--;
501 PROC_UNLOCK(p);
502 if (uap->timeout == NULL || error != EWOULDBLOCK)
503 return (0);
504 getnanouptime(&ts2);
505 if (timespeccmp(&ts2, &ts, >=))
506 return (0);
507 ts3 = ts;
508 timespecsub(&ts3, &ts2);
509 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
510 PROC_LOCK(p);
511 }
512 PROC_UNLOCK(p);
513 return (0);
514}
515
516/* struct kse_wakeup_args {
517 struct kse_mailbox *mbx;
518}; */
519int
520kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
521{
522 struct proc *p;
523 struct ksegrp *kg;
524 struct kse_upcall *ku;
525 struct thread *td2;
526
527 p = td->td_proc;
528 td2 = NULL;
529 ku = NULL;
530 /* KSE-enabled processes only, please. */
531 if (!(p->p_flag & P_THREADED))
532 return (EINVAL);
533 PROC_LOCK(p);
534 mtx_lock_spin(&sched_lock);
535 if (uap->mbx) {
536 FOREACH_KSEGRP_IN_PROC(p, kg) {
537 FOREACH_UPCALL_IN_GROUP(kg, ku) {
538 if (ku->ku_mailbox == uap->mbx)
539 break;
540 }
541 if (ku)
542 break;
543 }
544 } else {
545 kg = td->td_ksegrp;
546 if (kg->kg_upsleeps) {
547 wakeup_one(&kg->kg_completed);
548 mtx_unlock_spin(&sched_lock);
549 PROC_UNLOCK(p);
550 return (0);
551 }
552 ku = TAILQ_FIRST(&kg->kg_upcalls);
553 }
554 if (ku) {
555 if ((td2 = ku->ku_owner) == NULL) {
556 panic("%s: no owner", __func__);
557 } else if (TD_ON_SLEEPQ(td2) &&
558 (td2->td_wchan == &kg->kg_completed)) {
559 abortsleep(td2);
560 } else {
561 ku->ku_flags |= KUF_DOUPCALL;
562 }
563 mtx_unlock_spin(&sched_lock);
564 PROC_UNLOCK(p);
565 return (0);
566 }
567 mtx_unlock_spin(&sched_lock);
568 PROC_UNLOCK(p);
569 return (ESRCH);
570}
571
572/*
573 * No new KSEG: first call: use current KSE, don't schedule an upcall
574 * All other situations, do allocate max new KSEs and schedule an upcall.
575 */
576/* struct kse_create_args {
577 struct kse_mailbox *mbx;
578 int newgroup;
579}; */
580int
581kse_create(struct thread *td, struct kse_create_args *uap)
582{
583 struct kse *newke;
584 struct ksegrp *newkg;
585 struct ksegrp *kg;
586 struct proc *p;
587 struct kse_mailbox mbx;
588 struct kse_upcall *newku;
589 int err, ncpus;
590
591 p = td->td_proc;
592 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
593 return (err);
594
595 /* Too bad, why hasn't kernel always a cpu counter !? */
596#ifdef SMP
597 ncpus = mp_ncpus;
598#else
599 ncpus = 1;
600#endif
601 if (thread_debug && virtual_cpu != 0)
602 ncpus = virtual_cpu;
603
604 /* Easier to just set it than to test and set */
605 PROC_LOCK(p);
606 p->p_flag |= P_THREADED;
607 PROC_UNLOCK(p);
608 kg = td->td_ksegrp;
609 if (uap->newgroup) {
610 /* Have race condition but it is cheap */
611 if (p->p_numksegrps >= max_groups_per_proc)
612 return (EPROCLIM);
613 /*
614 * If we want a new KSEGRP it doesn't matter whether
615 * we have already fired up KSE mode before or not.
616 * We put the process in KSE mode and create a new KSEGRP.
617 */
618 newkg = ksegrp_alloc();
619 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
620 kg_startzero, kg_endzero));
621 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
622 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
623 mtx_lock_spin(&sched_lock);
624 if (p->p_numksegrps >= max_groups_per_proc) {
625 mtx_unlock_spin(&sched_lock);
626 ksegrp_free(newkg);
627 return (EPROCLIM);
628 }
629 ksegrp_link(newkg, p);
630 mtx_unlock_spin(&sched_lock);
631 } else {
632 newkg = kg;
633 }
634
635 /*
636 * Creating upcalls more than number of physical cpu does
637 * not help performance.
638 */
639 if (newkg->kg_numupcalls >= ncpus)
640 return (EPROCLIM);
641
642 if (newkg->kg_numupcalls == 0) {
643 /*
644 * Initialize KSE group, optimized for MP.
645 * Create KSEs as many as physical cpus, this increases
646 * concurrent even if userland is not MP safe and can only run
647 * on single CPU (for early version of libpthread, it is true).
648 * In ideal world, every physical cpu should execute a thread.
649 * If there is enough KSEs, threads in kernel can be
650 * executed parallel on different cpus with full speed,
651 * Concurrent in kernel shouldn't be restricted by number of
652 * upcalls userland provides.
653 * Adding more upcall structures only increases concurrent
654 * in userland.
655 * Highest performance configuration is:
656 * N kses = N upcalls = N phyiscal cpus
657 */
658 while (newkg->kg_kses < ncpus) {
659 newke = kse_alloc();
660 bzero(&newke->ke_startzero, RANGEOF(struct kse,
661 ke_startzero, ke_endzero));
662#if 0
663 mtx_lock_spin(&sched_lock);
664 bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
665 RANGEOF(struct kse, ke_startcopy, ke_endcopy));
666 mtx_unlock_spin(&sched_lock);
667#endif
668 mtx_lock_spin(&sched_lock);
669 kse_link(newke, newkg);
670 /* Add engine */
671 kse_reassign(newke);
672 mtx_unlock_spin(&sched_lock);
673 }
674 }
675 newku = upcall_alloc();
676 newku->ku_mailbox = uap->mbx;
677 newku->ku_func = mbx.km_func;
678 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
679
680 /* For the first call this may not have been set */
681 if (td->td_standin == NULL)
682 thread_alloc_spare(td, NULL);
683
684 mtx_lock_spin(&sched_lock);
685 if (newkg->kg_numupcalls >= ncpus) {
686 mtx_unlock_spin(&sched_lock);
687 upcall_free(newku);
688 return (EPROCLIM);
689 }
690 upcall_link(newku, newkg);
691 if (mbx.km_quantum)
692 newkg->kg_upquantum = max(1, mbx.km_quantum/tick);
693
694 /*
695 * Each upcall structure has an owner thread, find which
696 * one owns it.
697 */
698 if (uap->newgroup) {
699 /*
700 * Because new ksegrp hasn't thread,
701 * create an initial upcall thread to own it.
702 */
703 thread_schedule_upcall(td, newku);
704 } else {
705 /*
706 * If current thread hasn't an upcall structure,
707 * just assign the upcall to it.
708 */
709 if (td->td_upcall == NULL) {
710 newku->ku_owner = td;
711 td->td_upcall = newku;
712 } else {
713 /*
714 * Create a new upcall thread to own it.
715 */
716 thread_schedule_upcall(td, newku);
717 }
718 }
719 mtx_unlock_spin(&sched_lock);
720 return (0);
721}
722
723/*
724 * Fill a ucontext_t with a thread's context information.
725 *
726 * This is an analogue to getcontext(3).
727 */
728void
729thread_getcontext(struct thread *td, ucontext_t *uc)
730{
731
732 get_mcontext(td, &uc->uc_mcontext, 0);
733 PROC_LOCK(td->td_proc);
734 uc->uc_sigmask = td->td_sigmask;
735 PROC_UNLOCK(td->td_proc);
736}
737
738/*
739 * Set a thread's context from a ucontext_t.
740 *
741 * This is an analogue to setcontext(3).
742 */
743int
744thread_setcontext(struct thread *td, ucontext_t *uc)
745{
746 int ret;
747
748 ret = set_mcontext(td, &uc->uc_mcontext);
749 if (ret == 0) {
750 SIG_CANTMASK(uc->uc_sigmask);
751 PROC_LOCK(td->td_proc);
752 td->td_sigmask = uc->uc_sigmask;
753 PROC_UNLOCK(td->td_proc);
754 }
755 return (ret);
756}
757
758/*
759 * Initialize global thread allocation resources.
760 */
761void
762threadinit(void)
763{
764
765#ifndef __ia64__
766 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
767 thread_ctor, thread_dtor, thread_init, thread_fini,
768 UMA_ALIGN_CACHE, 0);
769#else
770 /*
771 * XXX the ia64 kstack allocator is really lame and is at the mercy
772 * of contigmallloc(). This hackery is to pre-construct a whole
773 * pile of thread structures with associated kernel stacks early
774 * in the system startup while contigmalloc() still works. Once we
775 * have them, keep them. Sigh.
776 */
777 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
778 thread_ctor, thread_dtor, thread_init, thread_fini,
779 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
780 uma_prealloc(thread_zone, 512); /* XXX arbitary */
781#endif
782 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
783 NULL, NULL, ksegrp_init, NULL,
784 UMA_ALIGN_CACHE, 0);
785 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
786 NULL, NULL, kse_init, NULL,
787 UMA_ALIGN_CACHE, 0);
788 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
789 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
790}
791
792/*
793 * Stash an embarasingly extra thread into the zombie thread queue.
794 */
795void
796thread_stash(struct thread *td)
797{
798 mtx_lock_spin(&kse_zombie_lock);
799 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
800 mtx_unlock_spin(&kse_zombie_lock);
801}
802
803/*
804 * Stash an embarasingly extra kse into the zombie kse queue.
805 */
806void
807kse_stash(struct kse *ke)
808{
809 mtx_lock_spin(&kse_zombie_lock);
810 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
811 mtx_unlock_spin(&kse_zombie_lock);
812}
813
814/*
815 * Stash an embarasingly extra upcall into the zombie upcall queue.
816 */
817
818void
819upcall_stash(struct kse_upcall *ku)
820{
821 mtx_lock_spin(&kse_zombie_lock);
822 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
823 mtx_unlock_spin(&kse_zombie_lock);
824}
825
826/*
827 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
828 */
829void
830ksegrp_stash(struct ksegrp *kg)
831{
832 mtx_lock_spin(&kse_zombie_lock);
833 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
834 mtx_unlock_spin(&kse_zombie_lock);
835}
836
837/*
838 * Reap zombie kse resource.
839 */
840void
841thread_reap(void)
842{
843 struct thread *td_first, *td_next;
844 struct kse *ke_first, *ke_next;
845 struct ksegrp *kg_first, * kg_next;
846 struct kse_upcall *ku_first, *ku_next;
847
848 /*
849 * Don't even bother to lock if none at this instant,
850 * we really don't care about the next instant..
851 */
852 if ((!TAILQ_EMPTY(&zombie_threads))
853 || (!TAILQ_EMPTY(&zombie_kses))
854 || (!TAILQ_EMPTY(&zombie_ksegrps))
855 || (!TAILQ_EMPTY(&zombie_upcalls))) {
856 mtx_lock_spin(&kse_zombie_lock);
857 td_first = TAILQ_FIRST(&zombie_threads);
858 ke_first = TAILQ_FIRST(&zombie_kses);
859 kg_first = TAILQ_FIRST(&zombie_ksegrps);
860 ku_first = TAILQ_FIRST(&zombie_upcalls);
861 if (td_first)
862 TAILQ_INIT(&zombie_threads);
863 if (ke_first)
864 TAILQ_INIT(&zombie_kses);
865 if (kg_first)
866 TAILQ_INIT(&zombie_ksegrps);
867 if (ku_first)
868 TAILQ_INIT(&zombie_upcalls);
869 mtx_unlock_spin(&kse_zombie_lock);
870 while (td_first) {
871 td_next = TAILQ_NEXT(td_first, td_runq);
872 if (td_first->td_ucred)
873 crfree(td_first->td_ucred);
874 thread_free(td_first);
875 td_first = td_next;
876 }
877 while (ke_first) {
878 ke_next = TAILQ_NEXT(ke_first, ke_procq);
879 kse_free(ke_first);
880 ke_first = ke_next;
881 }
882 while (kg_first) {
883 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
884 ksegrp_free(kg_first);
885 kg_first = kg_next;
886 }
887 while (ku_first) {
888 ku_next = TAILQ_NEXT(ku_first, ku_link);
889 upcall_free(ku_first);
890 ku_first = ku_next;
891 }
892 }
893}
894
895/*
896 * Allocate a ksegrp.
897 */
898struct ksegrp *
899ksegrp_alloc(void)
900{
901 return (uma_zalloc(ksegrp_zone, M_WAITOK));
902}
903
904/*
905 * Allocate a kse.
906 */
907struct kse *
908kse_alloc(void)
909{
910 return (uma_zalloc(kse_zone, M_WAITOK));
911}
912
913/*
914 * Allocate a thread.
915 */
916struct thread *
917thread_alloc(void)
918{
919 thread_reap(); /* check if any zombies to get */
920 return (uma_zalloc(thread_zone, M_WAITOK));
921}
922
923/*
924 * Deallocate a ksegrp.
925 */
926void
927ksegrp_free(struct ksegrp *td)
928{
929 uma_zfree(ksegrp_zone, td);
930}
931
932/*
933 * Deallocate a kse.
934 */
935void
936kse_free(struct kse *td)
937{
938 uma_zfree(kse_zone, td);
939}
940
941/*
942 * Deallocate a thread.
943 */
944void
945thread_free(struct thread *td)
946{
947
948 cpu_thread_clean(td);
949 uma_zfree(thread_zone, td);
950}
951
952/*
953 * Store the thread context in the UTS's mailbox.
954 * then add the mailbox at the head of a list we are building in user space.
955 * The list is anchored in the ksegrp structure.
956 */
957int
958thread_export_context(struct thread *td)
959{
960 struct proc *p;
961 struct ksegrp *kg;
962 uintptr_t mbx;
963 void *addr;
964 int error,temp;
965 ucontext_t uc;
966
967 p = td->td_proc;
968 kg = td->td_ksegrp;
969
970 /* Export the user/machine context. */
971 addr = (void *)(&td->td_mailbox->tm_context);
972 error = copyin(addr, &uc, sizeof(ucontext_t));
973 if (error)
974 goto bad;
975
976 thread_getcontext(td, &uc);
977 error = copyout(&uc, addr, sizeof(ucontext_t));
978 if (error)
979 goto bad;
980
981 /* Exports clock ticks in kernel mode */
982 addr = (caddr_t)(&td->td_mailbox->tm_sticks);
983 temp = fuword(addr) + td->td_usticks;
984 if (suword(addr, temp))
985 goto bad;
986
987 /* Get address in latest mbox of list pointer */
988 addr = (void *)(&td->td_mailbox->tm_next);
989 /*
990 * Put the saved address of the previous first
991 * entry into this one
992 */
993 for (;;) {
994 mbx = (uintptr_t)kg->kg_completed;
995 if (suword(addr, mbx)) {
996 error = EFAULT;
997 goto bad;
998 }
999 PROC_LOCK(p);
1000 if (mbx == (uintptr_t)kg->kg_completed) {
1001 kg->kg_completed = td->td_mailbox;
1002 /*
1003 * The thread context may be taken away by
1004 * other upcall threads when we unlock
1005 * process lock. it's no longer valid to
1006 * use it again in any other places.
1007 */
1008 td->td_mailbox = NULL;
1009 PROC_UNLOCK(p);
1010 break;
1011 }
1012 PROC_UNLOCK(p);
1013 }
1014 td->td_usticks = 0;
1015 return (0);
1016
1017bad:
1018 PROC_LOCK(p);
1019 psignal(p, SIGSEGV);
1020 PROC_UNLOCK(p);
1021 /* The mailbox is bad, don't use it */
1022 td->td_mailbox = NULL;
1023 td->td_usticks = 0;
1024 return (error);
1025}
1026
1027/*
1028 * Take the list of completed mailboxes for this KSEGRP and put them on this
1029 * upcall's mailbox as it's the next one going up.
1030 */
1031static int
1032thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
1033{
1034 struct proc *p = kg->kg_proc;
1035 void *addr;
1036 uintptr_t mbx;
1037
1038 addr = (void *)(&ku->ku_mailbox->km_completed);
1039 for (;;) {
1040 mbx = (uintptr_t)kg->kg_completed;
1041 if (suword(addr, mbx)) {
1042 PROC_LOCK(p);
1043 psignal(p, SIGSEGV);
1044 PROC_UNLOCK(p);
1045 return (EFAULT);
1046 }
1047 PROC_LOCK(p);
1048 if (mbx == (uintptr_t)kg->kg_completed) {
1049 kg->kg_completed = NULL;
1050 PROC_UNLOCK(p);
1051 break;
1052 }
1053 PROC_UNLOCK(p);
1054 }
1055 return (0);
1056}
1057
1058/*
1059 * This function should be called at statclock interrupt time
1060 */
1061int
1062thread_statclock(int user)
1063{
1064 struct thread *td = curthread;
1065
1066 if (td->td_ksegrp->kg_numupcalls == 0)
1067 return (-1);
1068 if (user) {
1069 /* Current always do via ast() */
1070 mtx_lock_spin(&sched_lock);
1071 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
1072 mtx_unlock_spin(&sched_lock);
1073 td->td_uuticks++;
1074 } else {
1075 if (td->td_mailbox != NULL)
1076 td->td_usticks++;
1077 else {
1078 /* XXXKSE
1079 * We will call thread_user_enter() for every
1080 * kernel entry in future, so if the thread mailbox
1081 * is NULL, it must be a UTS kernel, don't account
1082 * clock ticks for it.
1083 */
1084 }
1085 }
1086 return (0);
1087}
1088
1089/*
1090 * Export state clock ticks for userland
1091 */
1092static int
1093thread_update_usr_ticks(struct thread *td, int user)
1094{
1095 struct proc *p = td->td_proc;
1096 struct kse_thr_mailbox *tmbx;
1097 struct kse_upcall *ku;
1098 struct ksegrp *kg;
1099 caddr_t addr;
1100 uint uticks;
1101
1102 if ((ku = td->td_upcall) == NULL)
1103 return (-1);
1104
1105 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1106 if ((tmbx == NULL) || (tmbx == (void *)-1))
1107 return (-1);
1108 if (user) {
1109 uticks = td->td_uuticks;
1110 td->td_uuticks = 0;
1111 addr = (caddr_t)&tmbx->tm_uticks;
1112 } else {
1113 uticks = td->td_usticks;
1114 td->td_usticks = 0;
1115 addr = (caddr_t)&tmbx->tm_sticks;
1116 }
1117 if (uticks) {
1118 if (suword(addr, uticks+fuword(addr))) {
1119 PROC_LOCK(p);
1120 psignal(p, SIGSEGV);
1121 PROC_UNLOCK(p);
1122 return (-2);
1123 }
1124 }
1125 kg = td->td_ksegrp;
1126 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) {
1127 mtx_lock_spin(&sched_lock);
1128 td->td_upcall->ku_flags |= KUF_DOUPCALL;
1129 mtx_unlock_spin(&sched_lock);
1130 }
1131 return (0);
1132}
1133
1134/*
1135 * Discard the current thread and exit from its context.
1136 *
1137 * Because we can't free a thread while we're operating under its context,
1138 * push the current thread into our CPU's deadthread holder. This means
1139 * we needn't worry about someone else grabbing our context before we
1140 * do a cpu_throw().
1141 */
1142void
1143thread_exit(void)
1144{
1145 struct thread *td;
1146 struct kse *ke;
1147 struct proc *p;
1148 struct ksegrp *kg;
1149
1150 td = curthread;
1151 kg = td->td_ksegrp;
1152 p = td->td_proc;
1153 ke = td->td_kse;
1154
1155 mtx_assert(&sched_lock, MA_OWNED);
1156 KASSERT(p != NULL, ("thread exiting without a process"));
1157 KASSERT(ke != NULL, ("thread exiting without a kse"));
1158 KASSERT(kg != NULL, ("thread exiting without a kse group"));
1159 PROC_LOCK_ASSERT(p, MA_OWNED);
1160 CTR1(KTR_PROC, "thread_exit: thread %p", td);
1161 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
1162
1163 if (td->td_standin != NULL) {
1164 thread_stash(td->td_standin);
1165 td->td_standin = NULL;
1166 }
1167
1168 cpu_thread_exit(td); /* XXXSMP */
1169
1170 /*
1171 * The last thread is left attached to the process
1172 * So that the whole bundle gets recycled. Skip
1173 * all this stuff.
1174 */
1175 if (p->p_numthreads > 1) {
1176 thread_unlink(td);
1177 if (p->p_maxthrwaits)
1178 wakeup(&p->p_numthreads);
1179 /*
1180 * The test below is NOT true if we are the
1181 * sole exiting thread. P_STOPPED_SNGL is unset
1182 * in exit1() after it is the only survivor.
1183 */
1184 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1185 if (p->p_numthreads == p->p_suspcount) {
1186 thread_unsuspend_one(p->p_singlethread);
1187 }
1188 }
1189
1190 /*
1191 * Because each upcall structure has an owner thread,
1192 * owner thread exits only when process is in exiting
1193 * state, so upcall to userland is no longer needed,
1194 * deleting upcall structure is safe here.
1195 * So when all threads in a group is exited, all upcalls
1196 * in the group should be automatically freed.
1197 */
1198 if (td->td_upcall)
1199 upcall_remove(td);
1200
1201 ke->ke_state = KES_UNQUEUED;
1202 ke->ke_thread = NULL;
1203 /*
1204 * Decide what to do with the KSE attached to this thread.
1205 */
1206 if (ke->ke_flags & KEF_EXIT)
1207 kse_unlink(ke);
1208 else
1209 kse_reassign(ke);
1210 PROC_UNLOCK(p);
1211 td->td_kse = NULL;
1212 td->td_state = TDS_INACTIVE;
1213#if 0
1214 td->td_proc = NULL;
1215#endif
1216 td->td_ksegrp = NULL;
1217 td->td_last_kse = NULL;
1218 PCPU_SET(deadthread, td);
1219 } else {
1220 PROC_UNLOCK(p);
1221 }
1222 /* XXX Shouldn't cpu_throw() here. */
1223 mtx_assert(&sched_lock, MA_OWNED);
1224#if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
1224#if !defined(__alpha__) && !defined(__powerpc__)
1225 cpu_throw(td, choosethread());
1226#else
1227 cpu_throw();
1228#endif
1229 panic("I'm a teapot!");
1230 /* NOTREACHED */
1231}
1232
1233/*
1234 * Do any thread specific cleanups that may be needed in wait()
1235 * called with Giant held, proc and schedlock not held.
1236 */
1237void
1238thread_wait(struct proc *p)
1239{
1240 struct thread *td;
1241
1242 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1243 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1244 FOREACH_THREAD_IN_PROC(p, td) {
1245 if (td->td_standin != NULL) {
1246 thread_free(td->td_standin);
1247 td->td_standin = NULL;
1248 }
1249 cpu_thread_clean(td);
1250 }
1251 thread_reap(); /* check for zombie threads etc. */
1252}
1253
1254/*
1255 * Link a thread to a process.
1256 * set up anything that needs to be initialized for it to
1257 * be used by the process.
1258 *
1259 * Note that we do not link to the proc's ucred here.
1260 * The thread is linked as if running but no KSE assigned.
1261 */
1262void
1263thread_link(struct thread *td, struct ksegrp *kg)
1264{
1265 struct proc *p;
1266
1267 p = kg->kg_proc;
1268 td->td_state = TDS_INACTIVE;
1269 td->td_proc = p;
1270 td->td_ksegrp = kg;
1271 td->td_last_kse = NULL;
1272 td->td_flags = 0;
1273 td->td_kse = NULL;
1274
1275 LIST_INIT(&td->td_contested);
1276 callout_init(&td->td_slpcallout, 1);
1277 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1278 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1279 p->p_numthreads++;
1280 kg->kg_numthreads++;
1281}
1282
1283void
1284thread_unlink(struct thread *td)
1285{
1286 struct proc *p = td->td_proc;
1287 struct ksegrp *kg = td->td_ksegrp;
1288
1289 mtx_assert(&sched_lock, MA_OWNED);
1290 TAILQ_REMOVE(&p->p_threads, td, td_plist);
1291 p->p_numthreads--;
1292 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1293 kg->kg_numthreads--;
1294 /* could clear a few other things here */
1295}
1296
1297/*
1298 * Purge a ksegrp resource. When a ksegrp is preparing to
1299 * exit, it calls this function.
1300 */
1301static void
1302kse_purge_group(struct thread *td)
1303{
1304 struct ksegrp *kg;
1305 struct kse *ke;
1306
1307 kg = td->td_ksegrp;
1308 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1309 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1310 KASSERT(ke->ke_state == KES_IDLE,
1311 ("%s: wrong idle KSE state", __func__));
1312 kse_unlink(ke);
1313 }
1314 KASSERT((kg->kg_kses == 1),
1315 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1316 KASSERT((kg->kg_numupcalls == 0),
1317 ("%s: ksegrp still has %d upcall datas",
1318 __func__, kg->kg_numupcalls));
1319}
1320
1321/*
1322 * Purge a process's KSE resource. When a process is preparing to
1323 * exit, it calls kse_purge to release any extra KSE resources in
1324 * the process.
1325 */
1326static void
1327kse_purge(struct proc *p, struct thread *td)
1328{
1329 struct ksegrp *kg;
1330 struct kse *ke;
1331
1332 KASSERT(p->p_numthreads == 1, ("bad thread number"));
1333 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1334 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1335 p->p_numksegrps--;
1336 /*
1337 * There is no ownership for KSE, after all threads
1338 * in the group exited, it is possible that some KSEs
1339 * were left in idle queue, gc them now.
1340 */
1341 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1342 KASSERT(ke->ke_state == KES_IDLE,
1343 ("%s: wrong idle KSE state", __func__));
1344 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1345 kg->kg_idle_kses--;
1346 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1347 kg->kg_kses--;
1348 kse_stash(ke);
1349 }
1350 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1351 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1352 ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1353 KASSERT((kg->kg_numupcalls == 0),
1354 ("%s: ksegrp still has %d upcall datas",
1355 __func__, kg->kg_numupcalls));
1356
1357 if (kg != td->td_ksegrp)
1358 ksegrp_stash(kg);
1359 }
1360 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1361 p->p_numksegrps++;
1362}
1363
1364/*
1365 * This function is intended to be used to initialize a spare thread
1366 * for upcall. Initialize thread's large data area outside sched_lock
1367 * for thread_schedule_upcall().
1368 */
1369void
1370thread_alloc_spare(struct thread *td, struct thread *spare)
1371{
1372 if (td->td_standin)
1373 return;
1374 if (spare == NULL)
1375 spare = thread_alloc();
1376 td->td_standin = spare;
1377 bzero(&spare->td_startzero,
1378 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1379 spare->td_proc = td->td_proc;
1380 spare->td_ucred = crhold(td->td_ucred);
1381}
1382
1383/*
1384 * Create a thread and schedule it for upcall on the KSE given.
1385 * Use our thread's standin so that we don't have to allocate one.
1386 */
1387struct thread *
1388thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1389{
1390 struct thread *td2;
1391
1392 mtx_assert(&sched_lock, MA_OWNED);
1393
1394 /*
1395 * Schedule an upcall thread on specified kse_upcall,
1396 * the kse_upcall must be free.
1397 * td must have a spare thread.
1398 */
1399 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1400 if ((td2 = td->td_standin) != NULL) {
1401 td->td_standin = NULL;
1402 } else {
1403 panic("no reserve thread when scheduling an upcall");
1404 return (NULL);
1405 }
1406 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1407 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1408 bcopy(&td->td_startcopy, &td2->td_startcopy,
1409 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1410 thread_link(td2, ku->ku_ksegrp);
1411 /* inherit blocked thread's context */
1412 bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1413 cpu_set_upcall(td2, td->td_pcb);
1414 /* Let the new thread become owner of the upcall */
1415 ku->ku_owner = td2;
1416 td2->td_upcall = ku;
1417 td2->td_flags = TDF_UPCALLING;
1418#if 0 /* XXX This shouldn't be necessary */
1419 if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
1420 td2->td_flags |= TDF_ASTPENDING;
1421#endif
1422 td2->td_kse = NULL;
1423 td2->td_state = TDS_CAN_RUN;
1424 td2->td_inhibitors = 0;
1425 setrunqueue(td2);
1426 return (td2); /* bogus.. should be a void function */
1427}
1428
1429void
1430thread_signal_add(struct thread *td, int sig)
1431{
1432 struct kse_upcall *ku;
1433 struct proc *p;
1434 sigset_t ss;
1435 int error;
1436
1437 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
1438 td = curthread;
1439 ku = td->td_upcall;
1440 p = td->td_proc;
1441
1442 PROC_UNLOCK(p);
1443 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1444 if (error)
1445 goto error;
1446
1447 SIGADDSET(ss, sig);
1448
1449 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t));
1450 if (error)
1451 goto error;
1452
1453 PROC_LOCK(p);
1454 return;
1455error:
1456 PROC_LOCK(p);
1457 sigexit(td, SIGILL);
1458}
1459
1460
1461/*
1462 * Schedule an upcall to notify a KSE process recieved signals.
1463 *
1464 */
1465void
1466thread_signal_upcall(struct thread *td)
1467{
1468 mtx_lock_spin(&sched_lock);
1469 td->td_flags |= TDF_UPCALLING;
1470 mtx_unlock_spin(&sched_lock);
1471
1472 return;
1473}
1474
1475void
1476thread_switchout(struct thread *td)
1477{
1478 struct kse_upcall *ku;
1479
1480 mtx_assert(&sched_lock, MA_OWNED);
1481
1482 /*
1483 * If the outgoing thread is in threaded group and has never
1484 * scheduled an upcall, decide whether this is a short
1485 * or long term event and thus whether or not to schedule
1486 * an upcall.
1487 * If it is a short term event, just suspend it in
1488 * a way that takes its KSE with it.
1489 * Select the events for which we want to schedule upcalls.
1490 * For now it's just sleep.
1491 * XXXKSE eventually almost any inhibition could do.
1492 */
1493 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1494 /*
1495 * Release ownership of upcall, and schedule an upcall
1496 * thread, this new upcall thread becomes the owner of
1497 * the upcall structure.
1498 */
1499 ku = td->td_upcall;
1500 ku->ku_owner = NULL;
1501 td->td_upcall = NULL;
1502 td->td_flags &= ~TDF_CAN_UNBIND;
1503 thread_schedule_upcall(td, ku);
1504 }
1505}
1506
1507/*
1508 * Setup done on the thread when it enters the kernel.
1509 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1510 */
1511void
1512thread_user_enter(struct proc *p, struct thread *td)
1513{
1514 struct ksegrp *kg;
1515 struct kse_upcall *ku;
1516 struct kse_thr_mailbox *tmbx;
1517
1518 kg = td->td_ksegrp;
1519
1520 /*
1521 * First check that we shouldn't just abort.
1522 * But check if we are the single thread first!
1523 */
1524 PROC_LOCK(p);
1525 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1526 mtx_lock_spin(&sched_lock);
1527 thread_stopped(p);
1528 thread_exit();
1529 /* NOTREACHED */
1530 }
1531 PROC_UNLOCK(p);
1532
1533 /*
1534 * If we are doing a syscall in a KSE environment,
1535 * note where our mailbox is. There is always the
1536 * possibility that we could do this lazily (in kse_reassign()),
1537 * but for now do it every time.
1538 */
1539 kg = td->td_ksegrp;
1540 if (kg->kg_numupcalls) {
1541 ku = td->td_upcall;
1542 KASSERT(ku, ("%s: no upcall owned", __func__));
1543 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1544 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__));
1545 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags);
1546 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1547 if ((tmbx == NULL) || (tmbx == (void *)-1)) {
1548 td->td_mailbox = NULL;
1549 } else {
1550 td->td_mailbox = tmbx;
1551 if (td->td_standin == NULL)
1552 thread_alloc_spare(td, NULL);
1553 mtx_lock_spin(&sched_lock);
1554 if (ku->ku_mflags & KMF_NOUPCALL)
1555 td->td_flags &= ~TDF_CAN_UNBIND;
1556 else
1557 td->td_flags |= TDF_CAN_UNBIND;
1558 mtx_unlock_spin(&sched_lock);
1559 }
1560 }
1561}
1562
1563/*
1564 * The extra work we go through if we are a threaded process when we
1565 * return to userland.
1566 *
1567 * If we are a KSE process and returning to user mode, check for
1568 * extra work to do before we return (e.g. for more syscalls
1569 * to complete first). If we were in a critical section, we should
1570 * just return to let it finish. Same if we were in the UTS (in
1571 * which case the mailbox's context's busy indicator will be set).
1572 * The only traps we suport will have set the mailbox.
1573 * We will clear it here.
1574 */
1575int
1576thread_userret(struct thread *td, struct trapframe *frame)
1577{
1578 int error = 0, upcalls, uts_crit;
1579 struct kse_upcall *ku;
1580 struct ksegrp *kg, *kg2;
1581 struct proc *p;
1582 struct timespec ts;
1583
1584 p = td->td_proc;
1585 kg = td->td_ksegrp;
1586
1587 /* Nothing to do with non-threaded group/process */
1588 if (td->td_ksegrp->kg_numupcalls == 0)
1589 return (0);
1590
1591 /*
1592 * Stat clock interrupt hit in userland, it
1593 * is returning from interrupt, charge thread's
1594 * userland time for UTS.
1595 */
1596 if (td->td_flags & TDF_USTATCLOCK) {
1597 thread_update_usr_ticks(td, 1);
1598 mtx_lock_spin(&sched_lock);
1599 td->td_flags &= ~TDF_USTATCLOCK;
1600 mtx_unlock_spin(&sched_lock);
1601 if (kg->kg_completed ||
1602 (td->td_upcall->ku_flags & KUF_DOUPCALL))
1603 thread_user_enter(p, td);
1604 }
1605
1606 uts_crit = (td->td_mailbox == NULL);
1607 ku = td->td_upcall;
1608 /*
1609 * Optimisation:
1610 * This thread has not started any upcall.
1611 * If there is no work to report other than ourself,
1612 * then it can return direct to userland.
1613 */
1614 if (TD_CAN_UNBIND(td)) {
1615 mtx_lock_spin(&sched_lock);
1616 td->td_flags &= ~TDF_CAN_UNBIND;
1617 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1618 (kg->kg_completed == NULL) &&
1619 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1620 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1621 mtx_unlock_spin(&sched_lock);
1622 thread_update_usr_ticks(td, 0);
1623 nanotime(&ts);
1624 error = copyout(&ts,
1625 (caddr_t)&ku->ku_mailbox->km_timeofday,
1626 sizeof(ts));
1627 td->td_mailbox = 0;
1628 ku->ku_mflags = 0;
1629 if (error)
1630 goto out;
1631 return (0);
1632 }
1633 mtx_unlock_spin(&sched_lock);
1634 error = thread_export_context(td);
1635 if (error) {
1636 /*
1637 * Failing to do the KSE operation just defaults
1638 * back to synchonous operation, so just return from
1639 * the syscall.
1640 */
1641 goto out;
1642 }
1643 /*
1644 * There is something to report, and we own an upcall
1645 * strucuture, we can go to userland.
1646 * Turn ourself into an upcall thread.
1647 */
1648 mtx_lock_spin(&sched_lock);
1649 td->td_flags |= TDF_UPCALLING;
1650 mtx_unlock_spin(&sched_lock);
1651 } else if (td->td_mailbox && (ku == NULL)) {
1652 error = thread_export_context(td);
1653 /* possibly upcall with error? */
1654 PROC_LOCK(p);
1655 /*
1656 * There are upcall threads waiting for
1657 * work to do, wake one of them up.
1658 * XXXKSE Maybe wake all of them up.
1659 */
1660 if (!error && kg->kg_upsleeps)
1661 wakeup_one(&kg->kg_completed);
1662 mtx_lock_spin(&sched_lock);
1663 thread_stopped(p);
1664 thread_exit();
1665 /* NOTREACHED */
1666 }
1667
1668 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1669
1670 if (p->p_numthreads > max_threads_per_proc) {
1671 max_threads_hits++;
1672 PROC_LOCK(p);
1673 mtx_lock_spin(&sched_lock);
1674 while (p->p_numthreads > max_threads_per_proc) {
1675 if (P_SHOULDSTOP(p))
1676 break;
1677 upcalls = 0;
1678 FOREACH_KSEGRP_IN_PROC(p, kg2) {
1679 if (kg2->kg_numupcalls == 0)
1680 upcalls++;
1681 else
1682 upcalls += kg2->kg_numupcalls;
1683 }
1684 if (upcalls >= max_threads_per_proc)
1685 break;
1686 mtx_unlock_spin(&sched_lock);
1687 p->p_maxthrwaits++;
1688 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1689 "maxthreads", NULL);
1690 p->p_maxthrwaits--;
1691 mtx_lock_spin(&sched_lock);
1692 }
1693 mtx_unlock_spin(&sched_lock);
1694 PROC_UNLOCK(p);
1695 }
1696
1697 if (td->td_flags & TDF_UPCALLING) {
1698 uts_crit = 0;
1699 kg->kg_nextupcall = ticks+kg->kg_upquantum;
1700 /*
1701 * There is no more work to do and we are going to ride
1702 * this thread up to userland as an upcall.
1703 * Do the last parts of the setup needed for the upcall.
1704 */
1705 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1706 td, td->td_proc->p_pid, td->td_proc->p_comm);
1707
1708 mtx_lock_spin(&sched_lock);
1709 td->td_flags &= ~TDF_UPCALLING;
1710 if (ku->ku_flags & KUF_DOUPCALL)
1711 ku->ku_flags &= ~KUF_DOUPCALL;
1712 mtx_unlock_spin(&sched_lock);
1713
1714 /*
1715 * Set user context to the UTS
1716 */
1717 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1718 cpu_set_upcall_kse(td, ku);
1719 error = suword(&ku->ku_mailbox->km_curthread, 0);
1720 if (error)
1721 goto out;
1722 }
1723
1724 /*
1725 * Unhook the list of completed threads.
1726 * anything that completes after this gets to
1727 * come in next time.
1728 * Put the list of completed thread mailboxes on
1729 * this KSE's mailbox.
1730 */
1731 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1732 (error = thread_link_mboxes(kg, ku)) != 0)
1733 goto out;
1734 }
1735 if (!uts_crit) {
1736 nanotime(&ts);
1737 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1738 }
1739
1740out:
1741 if (error) {
1742 /*
1743 * Things are going to be so screwed we should just kill
1744 * the process.
1745 * how do we do that?
1746 */
1747 PROC_LOCK(td->td_proc);
1748 psignal(td->td_proc, SIGSEGV);
1749 PROC_UNLOCK(td->td_proc);
1750 } else {
1751 /*
1752 * Optimisation:
1753 * Ensure that we have a spare thread available,
1754 * for when we re-enter the kernel.
1755 */
1756 if (td->td_standin == NULL)
1757 thread_alloc_spare(td, NULL);
1758 }
1759
1760 ku->ku_mflags = 0;
1761 /*
1762 * Clear thread mailbox first, then clear system tick count.
1763 * The order is important because thread_statclock() use
1764 * mailbox pointer to see if it is an userland thread or
1765 * an UTS kernel thread.
1766 */
1767 td->td_mailbox = NULL;
1768 td->td_usticks = 0;
1769 return (error); /* go sync */
1770}
1771
1772/*
1773 * Enforce single-threading.
1774 *
1775 * Returns 1 if the caller must abort (another thread is waiting to
1776 * exit the process or similar). Process is locked!
1777 * Returns 0 when you are successfully the only thread running.
1778 * A process has successfully single threaded in the suspend mode when
1779 * There are no threads in user mode. Threads in the kernel must be
1780 * allowed to continue until they get to the user boundary. They may even
1781 * copy out their return values and data before suspending. They may however be
1782 * accellerated in reaching the user boundary as we will wake up
1783 * any sleeping threads that are interruptable. (PCATCH).
1784 */
1785int
1786thread_single(int force_exit)
1787{
1788 struct thread *td;
1789 struct thread *td2;
1790 struct proc *p;
1791
1792 td = curthread;
1793 p = td->td_proc;
1794 mtx_assert(&Giant, MA_OWNED);
1795 PROC_LOCK_ASSERT(p, MA_OWNED);
1796 KASSERT((td != NULL), ("curthread is NULL"));
1797
1798 if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1)
1799 return (0);
1800
1801 /* Is someone already single threading? */
1802 if (p->p_singlethread)
1803 return (1);
1804
1805 if (force_exit == SINGLE_EXIT) {
1806 p->p_flag |= P_SINGLE_EXIT;
1807 } else
1808 p->p_flag &= ~P_SINGLE_EXIT;
1809 p->p_flag |= P_STOPPED_SINGLE;
1810 mtx_lock_spin(&sched_lock);
1811 p->p_singlethread = td;
1812 while ((p->p_numthreads - p->p_suspcount) != 1) {
1813 FOREACH_THREAD_IN_PROC(p, td2) {
1814 if (td2 == td)
1815 continue;
1816 td2->td_flags |= TDF_ASTPENDING;
1817 if (TD_IS_INHIBITED(td2)) {
1818 if (force_exit == SINGLE_EXIT) {
1819 if (TD_IS_SUSPENDED(td2)) {
1820 thread_unsuspend_one(td2);
1821 }
1822 if (TD_ON_SLEEPQ(td2) &&
1823 (td2->td_flags & TDF_SINTR)) {
1824 if (td2->td_flags & TDF_CVWAITQ)
1825 cv_abort(td2);
1826 else
1827 abortsleep(td2);
1828 }
1829 } else {
1830 if (TD_IS_SUSPENDED(td2))
1831 continue;
1832 /*
1833 * maybe other inhibitted states too?
1834 * XXXKSE Is it totally safe to
1835 * suspend a non-interruptable thread?
1836 */
1837 if (td2->td_inhibitors &
1838 (TDI_SLEEPING | TDI_SWAPPED))
1839 thread_suspend_one(td2);
1840 }
1841 }
1842 }
1843 /*
1844 * Maybe we suspended some threads.. was it enough?
1845 */
1846 if ((p->p_numthreads - p->p_suspcount) == 1)
1847 break;
1848
1849 /*
1850 * Wake us up when everyone else has suspended.
1851 * In the mean time we suspend as well.
1852 */
1853 thread_suspend_one(td);
1854 DROP_GIANT();
1855 PROC_UNLOCK(p);
1856 p->p_stats->p_ru.ru_nvcsw++;
1857 mi_switch();
1858 mtx_unlock_spin(&sched_lock);
1859 PICKUP_GIANT();
1860 PROC_LOCK(p);
1861 mtx_lock_spin(&sched_lock);
1862 }
1863 if (force_exit == SINGLE_EXIT) {
1864 if (td->td_upcall)
1865 upcall_remove(td);
1866 kse_purge(p, td);
1867 }
1868 mtx_unlock_spin(&sched_lock);
1869 return (0);
1870}
1871
1872/*
1873 * Called in from locations that can safely check to see
1874 * whether we have to suspend or at least throttle for a
1875 * single-thread event (e.g. fork).
1876 *
1877 * Such locations include userret().
1878 * If the "return_instead" argument is non zero, the thread must be able to
1879 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1880 *
1881 * The 'return_instead' argument tells the function if it may do a
1882 * thread_exit() or suspend, or whether the caller must abort and back
1883 * out instead.
1884 *
1885 * If the thread that set the single_threading request has set the
1886 * P_SINGLE_EXIT bit in the process flags then this call will never return
1887 * if 'return_instead' is false, but will exit.
1888 *
1889 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1890 *---------------+--------------------+---------------------
1891 * 0 | returns 0 | returns 0 or 1
1892 * | when ST ends | immediatly
1893 *---------------+--------------------+---------------------
1894 * 1 | thread exits | returns 1
1895 * | | immediatly
1896 * 0 = thread_exit() or suspension ok,
1897 * other = return error instead of stopping the thread.
1898 *
1899 * While a full suspension is under effect, even a single threading
1900 * thread would be suspended if it made this call (but it shouldn't).
1901 * This call should only be made from places where
1902 * thread_exit() would be safe as that may be the outcome unless
1903 * return_instead is set.
1904 */
1905int
1906thread_suspend_check(int return_instead)
1907{
1908 struct thread *td;
1909 struct proc *p;
1910 struct ksegrp *kg;
1911
1912 td = curthread;
1913 p = td->td_proc;
1914 kg = td->td_ksegrp;
1915 PROC_LOCK_ASSERT(p, MA_OWNED);
1916 while (P_SHOULDSTOP(p)) {
1917 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1918 KASSERT(p->p_singlethread != NULL,
1919 ("singlethread not set"));
1920 /*
1921 * The only suspension in action is a
1922 * single-threading. Single threader need not stop.
1923 * XXX Should be safe to access unlocked
1924 * as it can only be set to be true by us.
1925 */
1926 if (p->p_singlethread == td)
1927 return (0); /* Exempt from stopping. */
1928 }
1929 if (return_instead)
1930 return (1);
1931
1932 mtx_lock_spin(&sched_lock);
1933 thread_stopped(p);
1934 /*
1935 * If the process is waiting for us to exit,
1936 * this thread should just suicide.
1937 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1938 */
1939 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1940 while (mtx_owned(&Giant))
1941 mtx_unlock(&Giant);
1942 if (p->p_flag & P_THREADED)
1943 thread_exit();
1944 else
1945 thr_exit1();
1946 }
1947
1948 /*
1949 * When a thread suspends, it just
1950 * moves to the processes's suspend queue
1951 * and stays there.
1952 */
1953 thread_suspend_one(td);
1954 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1955 if (p->p_numthreads == p->p_suspcount) {
1956 thread_unsuspend_one(p->p_singlethread);
1957 }
1958 }
1959 DROP_GIANT();
1960 PROC_UNLOCK(p);
1961 p->p_stats->p_ru.ru_nivcsw++;
1962 mi_switch();
1963 mtx_unlock_spin(&sched_lock);
1964 PICKUP_GIANT();
1965 PROC_LOCK(p);
1966 }
1967 return (0);
1968}
1969
1970void
1971thread_suspend_one(struct thread *td)
1972{
1973 struct proc *p = td->td_proc;
1974
1975 mtx_assert(&sched_lock, MA_OWNED);
1976 PROC_LOCK_ASSERT(p, MA_OWNED);
1977 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1978 p->p_suspcount++;
1979 TD_SET_SUSPENDED(td);
1980 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1981 /*
1982 * Hack: If we are suspending but are on the sleep queue
1983 * then we are in msleep or the cv equivalent. We
1984 * want to look like we have two Inhibitors.
1985 * May already be set.. doesn't matter.
1986 */
1987 if (TD_ON_SLEEPQ(td))
1988 TD_SET_SLEEPING(td);
1989}
1990
1991void
1992thread_unsuspend_one(struct thread *td)
1993{
1994 struct proc *p = td->td_proc;
1995
1996 mtx_assert(&sched_lock, MA_OWNED);
1997 PROC_LOCK_ASSERT(p, MA_OWNED);
1998 TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1999 TD_CLR_SUSPENDED(td);
2000 p->p_suspcount--;
2001 setrunnable(td);
2002}
2003
2004/*
2005 * Allow all threads blocked by single threading to continue running.
2006 */
2007void
2008thread_unsuspend(struct proc *p)
2009{
2010 struct thread *td;
2011
2012 mtx_assert(&sched_lock, MA_OWNED);
2013 PROC_LOCK_ASSERT(p, MA_OWNED);
2014 if (!P_SHOULDSTOP(p)) {
2015 while (( td = TAILQ_FIRST(&p->p_suspended))) {
2016 thread_unsuspend_one(td);
2017 }
2018 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
2019 (p->p_numthreads == p->p_suspcount)) {
2020 /*
2021 * Stopping everything also did the job for the single
2022 * threading request. Now we've downgraded to single-threaded,
2023 * let it continue.
2024 */
2025 thread_unsuspend_one(p->p_singlethread);
2026 }
2027}
2028
2029void
2030thread_single_end(void)
2031{
2032 struct thread *td;
2033 struct proc *p;
2034
2035 td = curthread;
2036 p = td->td_proc;
2037 PROC_LOCK_ASSERT(p, MA_OWNED);
2038 p->p_flag &= ~P_STOPPED_SINGLE;
2039 mtx_lock_spin(&sched_lock);
2040 p->p_singlethread = NULL;
2041 /*
2042 * If there are other threads they mey now run,
2043 * unless of course there is a blanket 'stop order'
2044 * on the process. The single threader must be allowed
2045 * to continue however as this is a bad place to stop.
2046 */
2047 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2048 while (( td = TAILQ_FIRST(&p->p_suspended))) {
2049 thread_unsuspend_one(td);
2050 }
2051 }
2052 mtx_unlock_spin(&sched_lock);
2053}
2054
2055
1225 cpu_throw(td, choosethread());
1226#else
1227 cpu_throw();
1228#endif
1229 panic("I'm a teapot!");
1230 /* NOTREACHED */
1231}
1232
1233/*
1234 * Do any thread specific cleanups that may be needed in wait()
1235 * called with Giant held, proc and schedlock not held.
1236 */
1237void
1238thread_wait(struct proc *p)
1239{
1240 struct thread *td;
1241
1242 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()"));
1243 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()"));
1244 FOREACH_THREAD_IN_PROC(p, td) {
1245 if (td->td_standin != NULL) {
1246 thread_free(td->td_standin);
1247 td->td_standin = NULL;
1248 }
1249 cpu_thread_clean(td);
1250 }
1251 thread_reap(); /* check for zombie threads etc. */
1252}
1253
1254/*
1255 * Link a thread to a process.
1256 * set up anything that needs to be initialized for it to
1257 * be used by the process.
1258 *
1259 * Note that we do not link to the proc's ucred here.
1260 * The thread is linked as if running but no KSE assigned.
1261 */
1262void
1263thread_link(struct thread *td, struct ksegrp *kg)
1264{
1265 struct proc *p;
1266
1267 p = kg->kg_proc;
1268 td->td_state = TDS_INACTIVE;
1269 td->td_proc = p;
1270 td->td_ksegrp = kg;
1271 td->td_last_kse = NULL;
1272 td->td_flags = 0;
1273 td->td_kse = NULL;
1274
1275 LIST_INIT(&td->td_contested);
1276 callout_init(&td->td_slpcallout, 1);
1277 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1278 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1279 p->p_numthreads++;
1280 kg->kg_numthreads++;
1281}
1282
1283void
1284thread_unlink(struct thread *td)
1285{
1286 struct proc *p = td->td_proc;
1287 struct ksegrp *kg = td->td_ksegrp;
1288
1289 mtx_assert(&sched_lock, MA_OWNED);
1290 TAILQ_REMOVE(&p->p_threads, td, td_plist);
1291 p->p_numthreads--;
1292 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1293 kg->kg_numthreads--;
1294 /* could clear a few other things here */
1295}
1296
1297/*
1298 * Purge a ksegrp resource. When a ksegrp is preparing to
1299 * exit, it calls this function.
1300 */
1301static void
1302kse_purge_group(struct thread *td)
1303{
1304 struct ksegrp *kg;
1305 struct kse *ke;
1306
1307 kg = td->td_ksegrp;
1308 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__));
1309 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1310 KASSERT(ke->ke_state == KES_IDLE,
1311 ("%s: wrong idle KSE state", __func__));
1312 kse_unlink(ke);
1313 }
1314 KASSERT((kg->kg_kses == 1),
1315 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses));
1316 KASSERT((kg->kg_numupcalls == 0),
1317 ("%s: ksegrp still has %d upcall datas",
1318 __func__, kg->kg_numupcalls));
1319}
1320
1321/*
1322 * Purge a process's KSE resource. When a process is preparing to
1323 * exit, it calls kse_purge to release any extra KSE resources in
1324 * the process.
1325 */
1326static void
1327kse_purge(struct proc *p, struct thread *td)
1328{
1329 struct ksegrp *kg;
1330 struct kse *ke;
1331
1332 KASSERT(p->p_numthreads == 1, ("bad thread number"));
1333 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1334 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1335 p->p_numksegrps--;
1336 /*
1337 * There is no ownership for KSE, after all threads
1338 * in the group exited, it is possible that some KSEs
1339 * were left in idle queue, gc them now.
1340 */
1341 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1342 KASSERT(ke->ke_state == KES_IDLE,
1343 ("%s: wrong idle KSE state", __func__));
1344 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1345 kg->kg_idle_kses--;
1346 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1347 kg->kg_kses--;
1348 kse_stash(ke);
1349 }
1350 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1351 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1352 ("ksegrp has wrong kg_kses: %d", kg->kg_kses));
1353 KASSERT((kg->kg_numupcalls == 0),
1354 ("%s: ksegrp still has %d upcall datas",
1355 __func__, kg->kg_numupcalls));
1356
1357 if (kg != td->td_ksegrp)
1358 ksegrp_stash(kg);
1359 }
1360 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1361 p->p_numksegrps++;
1362}
1363
1364/*
1365 * This function is intended to be used to initialize a spare thread
1366 * for upcall. Initialize thread's large data area outside sched_lock
1367 * for thread_schedule_upcall().
1368 */
1369void
1370thread_alloc_spare(struct thread *td, struct thread *spare)
1371{
1372 if (td->td_standin)
1373 return;
1374 if (spare == NULL)
1375 spare = thread_alloc();
1376 td->td_standin = spare;
1377 bzero(&spare->td_startzero,
1378 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1379 spare->td_proc = td->td_proc;
1380 spare->td_ucred = crhold(td->td_ucred);
1381}
1382
1383/*
1384 * Create a thread and schedule it for upcall on the KSE given.
1385 * Use our thread's standin so that we don't have to allocate one.
1386 */
1387struct thread *
1388thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1389{
1390 struct thread *td2;
1391
1392 mtx_assert(&sched_lock, MA_OWNED);
1393
1394 /*
1395 * Schedule an upcall thread on specified kse_upcall,
1396 * the kse_upcall must be free.
1397 * td must have a spare thread.
1398 */
1399 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1400 if ((td2 = td->td_standin) != NULL) {
1401 td->td_standin = NULL;
1402 } else {
1403 panic("no reserve thread when scheduling an upcall");
1404 return (NULL);
1405 }
1406 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1407 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1408 bcopy(&td->td_startcopy, &td2->td_startcopy,
1409 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1410 thread_link(td2, ku->ku_ksegrp);
1411 /* inherit blocked thread's context */
1412 bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1413 cpu_set_upcall(td2, td->td_pcb);
1414 /* Let the new thread become owner of the upcall */
1415 ku->ku_owner = td2;
1416 td2->td_upcall = ku;
1417 td2->td_flags = TDF_UPCALLING;
1418#if 0 /* XXX This shouldn't be necessary */
1419 if (td->td_proc->p_sflag & PS_NEEDSIGCHK)
1420 td2->td_flags |= TDF_ASTPENDING;
1421#endif
1422 td2->td_kse = NULL;
1423 td2->td_state = TDS_CAN_RUN;
1424 td2->td_inhibitors = 0;
1425 setrunqueue(td2);
1426 return (td2); /* bogus.. should be a void function */
1427}
1428
1429void
1430thread_signal_add(struct thread *td, int sig)
1431{
1432 struct kse_upcall *ku;
1433 struct proc *p;
1434 sigset_t ss;
1435 int error;
1436
1437 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
1438 td = curthread;
1439 ku = td->td_upcall;
1440 p = td->td_proc;
1441
1442 PROC_UNLOCK(p);
1443 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1444 if (error)
1445 goto error;
1446
1447 SIGADDSET(ss, sig);
1448
1449 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t));
1450 if (error)
1451 goto error;
1452
1453 PROC_LOCK(p);
1454 return;
1455error:
1456 PROC_LOCK(p);
1457 sigexit(td, SIGILL);
1458}
1459
1460
1461/*
1462 * Schedule an upcall to notify a KSE process recieved signals.
1463 *
1464 */
1465void
1466thread_signal_upcall(struct thread *td)
1467{
1468 mtx_lock_spin(&sched_lock);
1469 td->td_flags |= TDF_UPCALLING;
1470 mtx_unlock_spin(&sched_lock);
1471
1472 return;
1473}
1474
1475void
1476thread_switchout(struct thread *td)
1477{
1478 struct kse_upcall *ku;
1479
1480 mtx_assert(&sched_lock, MA_OWNED);
1481
1482 /*
1483 * If the outgoing thread is in threaded group and has never
1484 * scheduled an upcall, decide whether this is a short
1485 * or long term event and thus whether or not to schedule
1486 * an upcall.
1487 * If it is a short term event, just suspend it in
1488 * a way that takes its KSE with it.
1489 * Select the events for which we want to schedule upcalls.
1490 * For now it's just sleep.
1491 * XXXKSE eventually almost any inhibition could do.
1492 */
1493 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) {
1494 /*
1495 * Release ownership of upcall, and schedule an upcall
1496 * thread, this new upcall thread becomes the owner of
1497 * the upcall structure.
1498 */
1499 ku = td->td_upcall;
1500 ku->ku_owner = NULL;
1501 td->td_upcall = NULL;
1502 td->td_flags &= ~TDF_CAN_UNBIND;
1503 thread_schedule_upcall(td, ku);
1504 }
1505}
1506
1507/*
1508 * Setup done on the thread when it enters the kernel.
1509 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1510 */
1511void
1512thread_user_enter(struct proc *p, struct thread *td)
1513{
1514 struct ksegrp *kg;
1515 struct kse_upcall *ku;
1516 struct kse_thr_mailbox *tmbx;
1517
1518 kg = td->td_ksegrp;
1519
1520 /*
1521 * First check that we shouldn't just abort.
1522 * But check if we are the single thread first!
1523 */
1524 PROC_LOCK(p);
1525 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1526 mtx_lock_spin(&sched_lock);
1527 thread_stopped(p);
1528 thread_exit();
1529 /* NOTREACHED */
1530 }
1531 PROC_UNLOCK(p);
1532
1533 /*
1534 * If we are doing a syscall in a KSE environment,
1535 * note where our mailbox is. There is always the
1536 * possibility that we could do this lazily (in kse_reassign()),
1537 * but for now do it every time.
1538 */
1539 kg = td->td_ksegrp;
1540 if (kg->kg_numupcalls) {
1541 ku = td->td_upcall;
1542 KASSERT(ku, ("%s: no upcall owned", __func__));
1543 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
1544 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__));
1545 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags);
1546 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1547 if ((tmbx == NULL) || (tmbx == (void *)-1)) {
1548 td->td_mailbox = NULL;
1549 } else {
1550 td->td_mailbox = tmbx;
1551 if (td->td_standin == NULL)
1552 thread_alloc_spare(td, NULL);
1553 mtx_lock_spin(&sched_lock);
1554 if (ku->ku_mflags & KMF_NOUPCALL)
1555 td->td_flags &= ~TDF_CAN_UNBIND;
1556 else
1557 td->td_flags |= TDF_CAN_UNBIND;
1558 mtx_unlock_spin(&sched_lock);
1559 }
1560 }
1561}
1562
1563/*
1564 * The extra work we go through if we are a threaded process when we
1565 * return to userland.
1566 *
1567 * If we are a KSE process and returning to user mode, check for
1568 * extra work to do before we return (e.g. for more syscalls
1569 * to complete first). If we were in a critical section, we should
1570 * just return to let it finish. Same if we were in the UTS (in
1571 * which case the mailbox's context's busy indicator will be set).
1572 * The only traps we suport will have set the mailbox.
1573 * We will clear it here.
1574 */
1575int
1576thread_userret(struct thread *td, struct trapframe *frame)
1577{
1578 int error = 0, upcalls, uts_crit;
1579 struct kse_upcall *ku;
1580 struct ksegrp *kg, *kg2;
1581 struct proc *p;
1582 struct timespec ts;
1583
1584 p = td->td_proc;
1585 kg = td->td_ksegrp;
1586
1587 /* Nothing to do with non-threaded group/process */
1588 if (td->td_ksegrp->kg_numupcalls == 0)
1589 return (0);
1590
1591 /*
1592 * Stat clock interrupt hit in userland, it
1593 * is returning from interrupt, charge thread's
1594 * userland time for UTS.
1595 */
1596 if (td->td_flags & TDF_USTATCLOCK) {
1597 thread_update_usr_ticks(td, 1);
1598 mtx_lock_spin(&sched_lock);
1599 td->td_flags &= ~TDF_USTATCLOCK;
1600 mtx_unlock_spin(&sched_lock);
1601 if (kg->kg_completed ||
1602 (td->td_upcall->ku_flags & KUF_DOUPCALL))
1603 thread_user_enter(p, td);
1604 }
1605
1606 uts_crit = (td->td_mailbox == NULL);
1607 ku = td->td_upcall;
1608 /*
1609 * Optimisation:
1610 * This thread has not started any upcall.
1611 * If there is no work to report other than ourself,
1612 * then it can return direct to userland.
1613 */
1614 if (TD_CAN_UNBIND(td)) {
1615 mtx_lock_spin(&sched_lock);
1616 td->td_flags &= ~TDF_CAN_UNBIND;
1617 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1618 (kg->kg_completed == NULL) &&
1619 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1620 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1621 mtx_unlock_spin(&sched_lock);
1622 thread_update_usr_ticks(td, 0);
1623 nanotime(&ts);
1624 error = copyout(&ts,
1625 (caddr_t)&ku->ku_mailbox->km_timeofday,
1626 sizeof(ts));
1627 td->td_mailbox = 0;
1628 ku->ku_mflags = 0;
1629 if (error)
1630 goto out;
1631 return (0);
1632 }
1633 mtx_unlock_spin(&sched_lock);
1634 error = thread_export_context(td);
1635 if (error) {
1636 /*
1637 * Failing to do the KSE operation just defaults
1638 * back to synchonous operation, so just return from
1639 * the syscall.
1640 */
1641 goto out;
1642 }
1643 /*
1644 * There is something to report, and we own an upcall
1645 * strucuture, we can go to userland.
1646 * Turn ourself into an upcall thread.
1647 */
1648 mtx_lock_spin(&sched_lock);
1649 td->td_flags |= TDF_UPCALLING;
1650 mtx_unlock_spin(&sched_lock);
1651 } else if (td->td_mailbox && (ku == NULL)) {
1652 error = thread_export_context(td);
1653 /* possibly upcall with error? */
1654 PROC_LOCK(p);
1655 /*
1656 * There are upcall threads waiting for
1657 * work to do, wake one of them up.
1658 * XXXKSE Maybe wake all of them up.
1659 */
1660 if (!error && kg->kg_upsleeps)
1661 wakeup_one(&kg->kg_completed);
1662 mtx_lock_spin(&sched_lock);
1663 thread_stopped(p);
1664 thread_exit();
1665 /* NOTREACHED */
1666 }
1667
1668 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1669
1670 if (p->p_numthreads > max_threads_per_proc) {
1671 max_threads_hits++;
1672 PROC_LOCK(p);
1673 mtx_lock_spin(&sched_lock);
1674 while (p->p_numthreads > max_threads_per_proc) {
1675 if (P_SHOULDSTOP(p))
1676 break;
1677 upcalls = 0;
1678 FOREACH_KSEGRP_IN_PROC(p, kg2) {
1679 if (kg2->kg_numupcalls == 0)
1680 upcalls++;
1681 else
1682 upcalls += kg2->kg_numupcalls;
1683 }
1684 if (upcalls >= max_threads_per_proc)
1685 break;
1686 mtx_unlock_spin(&sched_lock);
1687 p->p_maxthrwaits++;
1688 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1689 "maxthreads", NULL);
1690 p->p_maxthrwaits--;
1691 mtx_lock_spin(&sched_lock);
1692 }
1693 mtx_unlock_spin(&sched_lock);
1694 PROC_UNLOCK(p);
1695 }
1696
1697 if (td->td_flags & TDF_UPCALLING) {
1698 uts_crit = 0;
1699 kg->kg_nextupcall = ticks+kg->kg_upquantum;
1700 /*
1701 * There is no more work to do and we are going to ride
1702 * this thread up to userland as an upcall.
1703 * Do the last parts of the setup needed for the upcall.
1704 */
1705 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1706 td, td->td_proc->p_pid, td->td_proc->p_comm);
1707
1708 mtx_lock_spin(&sched_lock);
1709 td->td_flags &= ~TDF_UPCALLING;
1710 if (ku->ku_flags & KUF_DOUPCALL)
1711 ku->ku_flags &= ~KUF_DOUPCALL;
1712 mtx_unlock_spin(&sched_lock);
1713
1714 /*
1715 * Set user context to the UTS
1716 */
1717 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1718 cpu_set_upcall_kse(td, ku);
1719 error = suword(&ku->ku_mailbox->km_curthread, 0);
1720 if (error)
1721 goto out;
1722 }
1723
1724 /*
1725 * Unhook the list of completed threads.
1726 * anything that completes after this gets to
1727 * come in next time.
1728 * Put the list of completed thread mailboxes on
1729 * this KSE's mailbox.
1730 */
1731 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1732 (error = thread_link_mboxes(kg, ku)) != 0)
1733 goto out;
1734 }
1735 if (!uts_crit) {
1736 nanotime(&ts);
1737 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1738 }
1739
1740out:
1741 if (error) {
1742 /*
1743 * Things are going to be so screwed we should just kill
1744 * the process.
1745 * how do we do that?
1746 */
1747 PROC_LOCK(td->td_proc);
1748 psignal(td->td_proc, SIGSEGV);
1749 PROC_UNLOCK(td->td_proc);
1750 } else {
1751 /*
1752 * Optimisation:
1753 * Ensure that we have a spare thread available,
1754 * for when we re-enter the kernel.
1755 */
1756 if (td->td_standin == NULL)
1757 thread_alloc_spare(td, NULL);
1758 }
1759
1760 ku->ku_mflags = 0;
1761 /*
1762 * Clear thread mailbox first, then clear system tick count.
1763 * The order is important because thread_statclock() use
1764 * mailbox pointer to see if it is an userland thread or
1765 * an UTS kernel thread.
1766 */
1767 td->td_mailbox = NULL;
1768 td->td_usticks = 0;
1769 return (error); /* go sync */
1770}
1771
1772/*
1773 * Enforce single-threading.
1774 *
1775 * Returns 1 if the caller must abort (another thread is waiting to
1776 * exit the process or similar). Process is locked!
1777 * Returns 0 when you are successfully the only thread running.
1778 * A process has successfully single threaded in the suspend mode when
1779 * There are no threads in user mode. Threads in the kernel must be
1780 * allowed to continue until they get to the user boundary. They may even
1781 * copy out their return values and data before suspending. They may however be
1782 * accellerated in reaching the user boundary as we will wake up
1783 * any sleeping threads that are interruptable. (PCATCH).
1784 */
1785int
1786thread_single(int force_exit)
1787{
1788 struct thread *td;
1789 struct thread *td2;
1790 struct proc *p;
1791
1792 td = curthread;
1793 p = td->td_proc;
1794 mtx_assert(&Giant, MA_OWNED);
1795 PROC_LOCK_ASSERT(p, MA_OWNED);
1796 KASSERT((td != NULL), ("curthread is NULL"));
1797
1798 if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1)
1799 return (0);
1800
1801 /* Is someone already single threading? */
1802 if (p->p_singlethread)
1803 return (1);
1804
1805 if (force_exit == SINGLE_EXIT) {
1806 p->p_flag |= P_SINGLE_EXIT;
1807 } else
1808 p->p_flag &= ~P_SINGLE_EXIT;
1809 p->p_flag |= P_STOPPED_SINGLE;
1810 mtx_lock_spin(&sched_lock);
1811 p->p_singlethread = td;
1812 while ((p->p_numthreads - p->p_suspcount) != 1) {
1813 FOREACH_THREAD_IN_PROC(p, td2) {
1814 if (td2 == td)
1815 continue;
1816 td2->td_flags |= TDF_ASTPENDING;
1817 if (TD_IS_INHIBITED(td2)) {
1818 if (force_exit == SINGLE_EXIT) {
1819 if (TD_IS_SUSPENDED(td2)) {
1820 thread_unsuspend_one(td2);
1821 }
1822 if (TD_ON_SLEEPQ(td2) &&
1823 (td2->td_flags & TDF_SINTR)) {
1824 if (td2->td_flags & TDF_CVWAITQ)
1825 cv_abort(td2);
1826 else
1827 abortsleep(td2);
1828 }
1829 } else {
1830 if (TD_IS_SUSPENDED(td2))
1831 continue;
1832 /*
1833 * maybe other inhibitted states too?
1834 * XXXKSE Is it totally safe to
1835 * suspend a non-interruptable thread?
1836 */
1837 if (td2->td_inhibitors &
1838 (TDI_SLEEPING | TDI_SWAPPED))
1839 thread_suspend_one(td2);
1840 }
1841 }
1842 }
1843 /*
1844 * Maybe we suspended some threads.. was it enough?
1845 */
1846 if ((p->p_numthreads - p->p_suspcount) == 1)
1847 break;
1848
1849 /*
1850 * Wake us up when everyone else has suspended.
1851 * In the mean time we suspend as well.
1852 */
1853 thread_suspend_one(td);
1854 DROP_GIANT();
1855 PROC_UNLOCK(p);
1856 p->p_stats->p_ru.ru_nvcsw++;
1857 mi_switch();
1858 mtx_unlock_spin(&sched_lock);
1859 PICKUP_GIANT();
1860 PROC_LOCK(p);
1861 mtx_lock_spin(&sched_lock);
1862 }
1863 if (force_exit == SINGLE_EXIT) {
1864 if (td->td_upcall)
1865 upcall_remove(td);
1866 kse_purge(p, td);
1867 }
1868 mtx_unlock_spin(&sched_lock);
1869 return (0);
1870}
1871
1872/*
1873 * Called in from locations that can safely check to see
1874 * whether we have to suspend or at least throttle for a
1875 * single-thread event (e.g. fork).
1876 *
1877 * Such locations include userret().
1878 * If the "return_instead" argument is non zero, the thread must be able to
1879 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1880 *
1881 * The 'return_instead' argument tells the function if it may do a
1882 * thread_exit() or suspend, or whether the caller must abort and back
1883 * out instead.
1884 *
1885 * If the thread that set the single_threading request has set the
1886 * P_SINGLE_EXIT bit in the process flags then this call will never return
1887 * if 'return_instead' is false, but will exit.
1888 *
1889 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1890 *---------------+--------------------+---------------------
1891 * 0 | returns 0 | returns 0 or 1
1892 * | when ST ends | immediatly
1893 *---------------+--------------------+---------------------
1894 * 1 | thread exits | returns 1
1895 * | | immediatly
1896 * 0 = thread_exit() or suspension ok,
1897 * other = return error instead of stopping the thread.
1898 *
1899 * While a full suspension is under effect, even a single threading
1900 * thread would be suspended if it made this call (but it shouldn't).
1901 * This call should only be made from places where
1902 * thread_exit() would be safe as that may be the outcome unless
1903 * return_instead is set.
1904 */
1905int
1906thread_suspend_check(int return_instead)
1907{
1908 struct thread *td;
1909 struct proc *p;
1910 struct ksegrp *kg;
1911
1912 td = curthread;
1913 p = td->td_proc;
1914 kg = td->td_ksegrp;
1915 PROC_LOCK_ASSERT(p, MA_OWNED);
1916 while (P_SHOULDSTOP(p)) {
1917 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1918 KASSERT(p->p_singlethread != NULL,
1919 ("singlethread not set"));
1920 /*
1921 * The only suspension in action is a
1922 * single-threading. Single threader need not stop.
1923 * XXX Should be safe to access unlocked
1924 * as it can only be set to be true by us.
1925 */
1926 if (p->p_singlethread == td)
1927 return (0); /* Exempt from stopping. */
1928 }
1929 if (return_instead)
1930 return (1);
1931
1932 mtx_lock_spin(&sched_lock);
1933 thread_stopped(p);
1934 /*
1935 * If the process is waiting for us to exit,
1936 * this thread should just suicide.
1937 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1938 */
1939 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1940 while (mtx_owned(&Giant))
1941 mtx_unlock(&Giant);
1942 if (p->p_flag & P_THREADED)
1943 thread_exit();
1944 else
1945 thr_exit1();
1946 }
1947
1948 /*
1949 * When a thread suspends, it just
1950 * moves to the processes's suspend queue
1951 * and stays there.
1952 */
1953 thread_suspend_one(td);
1954 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1955 if (p->p_numthreads == p->p_suspcount) {
1956 thread_unsuspend_one(p->p_singlethread);
1957 }
1958 }
1959 DROP_GIANT();
1960 PROC_UNLOCK(p);
1961 p->p_stats->p_ru.ru_nivcsw++;
1962 mi_switch();
1963 mtx_unlock_spin(&sched_lock);
1964 PICKUP_GIANT();
1965 PROC_LOCK(p);
1966 }
1967 return (0);
1968}
1969
1970void
1971thread_suspend_one(struct thread *td)
1972{
1973 struct proc *p = td->td_proc;
1974
1975 mtx_assert(&sched_lock, MA_OWNED);
1976 PROC_LOCK_ASSERT(p, MA_OWNED);
1977 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1978 p->p_suspcount++;
1979 TD_SET_SUSPENDED(td);
1980 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1981 /*
1982 * Hack: If we are suspending but are on the sleep queue
1983 * then we are in msleep or the cv equivalent. We
1984 * want to look like we have two Inhibitors.
1985 * May already be set.. doesn't matter.
1986 */
1987 if (TD_ON_SLEEPQ(td))
1988 TD_SET_SLEEPING(td);
1989}
1990
1991void
1992thread_unsuspend_one(struct thread *td)
1993{
1994 struct proc *p = td->td_proc;
1995
1996 mtx_assert(&sched_lock, MA_OWNED);
1997 PROC_LOCK_ASSERT(p, MA_OWNED);
1998 TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1999 TD_CLR_SUSPENDED(td);
2000 p->p_suspcount--;
2001 setrunnable(td);
2002}
2003
2004/*
2005 * Allow all threads blocked by single threading to continue running.
2006 */
2007void
2008thread_unsuspend(struct proc *p)
2009{
2010 struct thread *td;
2011
2012 mtx_assert(&sched_lock, MA_OWNED);
2013 PROC_LOCK_ASSERT(p, MA_OWNED);
2014 if (!P_SHOULDSTOP(p)) {
2015 while (( td = TAILQ_FIRST(&p->p_suspended))) {
2016 thread_unsuspend_one(td);
2017 }
2018 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
2019 (p->p_numthreads == p->p_suspcount)) {
2020 /*
2021 * Stopping everything also did the job for the single
2022 * threading request. Now we've downgraded to single-threaded,
2023 * let it continue.
2024 */
2025 thread_unsuspend_one(p->p_singlethread);
2026 }
2027}
2028
2029void
2030thread_single_end(void)
2031{
2032 struct thread *td;
2033 struct proc *p;
2034
2035 td = curthread;
2036 p = td->td_proc;
2037 PROC_LOCK_ASSERT(p, MA_OWNED);
2038 p->p_flag &= ~P_STOPPED_SINGLE;
2039 mtx_lock_spin(&sched_lock);
2040 p->p_singlethread = NULL;
2041 /*
2042 * If there are other threads they mey now run,
2043 * unless of course there is a blanket 'stop order'
2044 * on the process. The single threader must be allowed
2045 * to continue however as this is a bad place to stop.
2046 */
2047 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
2048 while (( td = TAILQ_FIRST(&p->p_suspended))) {
2049 thread_unsuspend_one(td);
2050 }
2051 }
2052 mtx_unlock_spin(&sched_lock);
2053}
2054
2055