kern_switch.c revision 112397
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_switch.c 112397 2003-03-19 05:49:38Z davidxu $
27 */
28
29/***
30
31Here is the logic..
32
33If there are N processors, then there are at most N KSEs (kernel
34schedulable entities) working to process threads that belong to a
35KSEGOUP (kg). If there are X of these KSEs actually running at the
36moment in question, then there are at most M (N-X) of these KSEs on
37the run queue, as running KSEs are not on the queue.
38
39Runnable threads are queued off the KSEGROUP in priority order.
40If there are M or more threads runnable, the top M threads
41(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42their priority from those threads and are put on the run queue.
43
44The last thread that had a priority high enough to have a KSE associated
45with it, AND IS ON THE RUN QUEUE is pointed to by
46kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47assigned as all the available KSEs are activly running, or because there
48are no threads queued, that pointer is NULL.
49
50When a KSE is removed from the run queue to become runnable, we know
51it was associated with the highest priority thread in the queue (at the head
52of the queue). If it is also the last assigned we know M was 1 and must
53now be 0. Since the thread is no longer queued that pointer must be
54removed from it. Since we know there were no more KSEs available,
55(M was 1 and is now 0) and since we are not FREEING our KSE
56but using it, we know there are STILL no more KSEs available, we can prove
57that the next thread in the ksegrp list will not have a KSE to assign to
58it, so we can show that the pointer must be made 'invalid' (NULL).
59
60The pointer exists so that when a new thread is made runnable, it can
61have its priority compared with the last assigned thread to see if
62it should 'steal' its KSE or not.. i.e. is it 'earlier'
63on the list than that thread or later.. If it's earlier, then the KSE is
64removed from the last assigned (which is now not assigned a KSE)
65and reassigned to the new thread, which is placed earlier in the list.
66The pointer is then backed up to the previous thread (which may or may not
67be the new thread).
68
69When a thread sleeps or is removed, the KSE becomes available and if there
70are queued threads that are not assigned KSEs, the highest priority one of
71them is assigned the KSE, which is then placed back on the run queue at
72the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73to point to it.
74
75The following diagram shows 2 KSEs and 3 threads from a single process.
76
77 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78              \    \____
79               \        \
80    KSEGROUP---thread--thread--thread    (queued in priority order)
81        \                 /
82         \_______________/
83          (last_assigned)
84
85The result of this scheme is that the M available KSEs are always
86queued at the priorities they have inherrited from the M highest priority
87threads for that KSEGROUP. If this situation changes, the KSEs are
88reassigned to keep this true.
89
90*/
91
92#include <sys/param.h>
93#include <sys/systm.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/queue.h>
100#include <sys/sched.h>
101#include <machine/critical.h>
102
103CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
104
105void panc(char *string1, char *string2);
106
107#if 0
108static void runq_readjust(struct runq *rq, struct kse *ke);
109#endif
110/************************************************************************
111 * Functions that manipulate runnability from a thread perspective.	*
112 ************************************************************************/
113/*
114 * Select the KSE that will be run next.  From that find the thread, and
115 * remove it from the KSEGRP's run queue.  If there is thread clustering,
116 * this will be what does it.
117 */
118struct thread *
119choosethread(void)
120{
121	struct kse *ke;
122	struct thread *td;
123	struct ksegrp *kg;
124
125retry:
126	if ((ke = sched_choose())) {
127		td = ke->ke_thread;
128		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
129		kg = ke->ke_ksegrp;
130		if (td->td_proc->p_flag & P_THREADED) {
131			if (kg->kg_last_assigned == td) {
132				kg->kg_last_assigned = TAILQ_PREV(td,
133				    threadqueue, td_runq);
134			}
135			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
136		}
137		kg->kg_runnable--;
138		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
139		    td, td->td_priority);
140	} else {
141		/* Simulate runq_choose() having returned the idle thread */
142		td = PCPU_GET(idlethread);
143		ke = td->td_kse;
144		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
145	}
146	ke->ke_flags |= KEF_DIDRUN;
147
148	/*
149	 * Only allow non system threads to run in panic
150	 * if they are the one we are tracing.  (I think.. [JRE])
151	 */
152	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
153	    (td->td_flags & TDF_INPANIC) == 0))
154		goto retry;
155
156	TD_SET_RUNNING(td);
157	return (td);
158}
159
160/*
161 * Given a surplus KSE, either assign a new runable thread to it
162 * (and put it in the run queue) or put it in the ksegrp's idle KSE list.
163 * Assumes that the original thread is not runnable.
164 */
165void
166kse_reassign(struct kse *ke)
167{
168	struct ksegrp *kg;
169	struct thread *td;
170	struct thread *original;
171
172	mtx_assert(&sched_lock, MA_OWNED);
173	original = ke->ke_thread;
174	KASSERT(original == NULL || TD_IS_INHIBITED(original),
175    	    ("reassigning KSE with runnable thread"));
176	kg = ke->ke_ksegrp;
177	if (original)
178		original->td_kse = NULL;
179
180	/*
181	 * Find the first unassigned thread
182	 */
183	if ((td = kg->kg_last_assigned) != NULL)
184		td = TAILQ_NEXT(td, td_runq);
185	else
186		td = TAILQ_FIRST(&kg->kg_runq);
187
188	/*
189	 * If we found one, assign it the kse, otherwise idle the kse.
190	 */
191	if (td) {
192		kg->kg_last_assigned = td;
193		td->td_kse = ke;
194		ke->ke_thread = td;
195		sched_add(ke);
196		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
197		return;
198	}
199
200	ke->ke_state = KES_IDLE;
201	ke->ke_thread = NULL;
202	TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
203	kg->kg_idle_kses++;
204	CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
205	return;
206}
207
208#if 0
209/*
210 * Remove a thread from its KSEGRP's run queue.
211 * This in turn may remove it from a KSE if it was already assigned
212 * to one, possibly causing a new thread to be assigned to the KSE
213 * and the KSE getting a new priority.
214 */
215static void
216remrunqueue(struct thread *td)
217{
218	struct thread *td2, *td3;
219	struct ksegrp *kg;
220	struct kse *ke;
221
222	mtx_assert(&sched_lock, MA_OWNED);
223	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
224	kg = td->td_ksegrp;
225	ke = td->td_kse;
226	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
227	kg->kg_runnable--;
228	TD_SET_CAN_RUN(td);
229	/*
230	 * If it is not a threaded process, take the shortcut.
231	 */
232	if ((td->td_proc->p_flag & P_THREADED) == 0) {
233		/* Bring its kse with it, leave the thread attached */
234		sched_rem(ke);
235		ke->ke_state = KES_THREAD;
236		return;
237	}
238   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
239	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
240	if (ke) {
241		/*
242		 * This thread has been assigned to a KSE.
243		 * We need to dissociate it and try assign the
244		 * KSE to the next available thread. Then, we should
245		 * see if we need to move the KSE in the run queues.
246		 */
247		sched_rem(ke);
248		ke->ke_state = KES_THREAD;
249		td2 = kg->kg_last_assigned;
250		KASSERT((td2 != NULL), ("last assigned has wrong value"));
251		if (td2 == td)
252			kg->kg_last_assigned = td3;
253		kse_reassign(ke);
254	}
255}
256#endif
257
258/*
259 * Change the priority of a thread that is on the run queue.
260 */
261void
262adjustrunqueue( struct thread *td, int newpri)
263{
264	struct ksegrp *kg;
265	struct kse *ke;
266
267	mtx_assert(&sched_lock, MA_OWNED);
268	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
269
270	ke = td->td_kse;
271	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
272	/*
273	 * If it is not a threaded process, take the shortcut.
274	 */
275	if ((td->td_proc->p_flag & P_THREADED) == 0) {
276		/* We only care about the kse in the run queue. */
277		td->td_priority = newpri;
278		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
279			sched_rem(ke);
280			sched_add(ke);
281		}
282		return;
283	}
284
285	/* It is a threaded process */
286	kg = td->td_ksegrp;
287	kg->kg_runnable--;
288	TD_SET_CAN_RUN(td);
289	if (ke) {
290		if (kg->kg_last_assigned == td) {
291			kg->kg_last_assigned =
292			    TAILQ_PREV(td, threadqueue, td_runq);
293		}
294		sched_rem(ke);
295	}
296	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
297	td->td_priority = newpri;
298	setrunqueue(td);
299}
300
301void
302setrunqueue(struct thread *td)
303{
304	struct kse *ke;
305	struct ksegrp *kg;
306	struct thread *td2;
307	struct thread *tda;
308
309	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
310	mtx_assert(&sched_lock, MA_OWNED);
311	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
312	    ("setrunqueue: bad thread state"));
313	TD_SET_RUNQ(td);
314	kg = td->td_ksegrp;
315	kg->kg_runnable++;
316	if ((td->td_proc->p_flag & P_THREADED) == 0) {
317		/*
318		 * Common path optimisation: Only one of everything
319		 * and the KSE is always already attached.
320		 * Totally ignore the ksegrp run queue.
321		 */
322		sched_add(td->td_kse);
323		return;
324	}
325
326	tda = kg->kg_last_assigned;
327	if ((ke = td->td_kse) == NULL) {
328		if (kg->kg_idle_kses) {
329			/*
330			 * There is a free one so it's ours for the asking..
331			 */
332			ke = TAILQ_FIRST(&kg->kg_iq);
333			TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
334			ke->ke_state = KES_THREAD;
335			kg->kg_idle_kses--;
336		} else if (tda && (tda->td_priority > td->td_priority)) {
337			/*
338			 * None free, but there is one we can commandeer.
339			 */
340			ke = tda->td_kse;
341			tda->td_kse = NULL;
342			ke->ke_thread = NULL;
343			tda = kg->kg_last_assigned =
344		    	    TAILQ_PREV(tda, threadqueue, td_runq);
345			sched_rem(ke);
346		}
347	} else {
348		/*
349		 * Temporarily disassociate so it looks like the other cases.
350		 */
351		ke->ke_thread = NULL;
352		td->td_kse = NULL;
353	}
354
355	/*
356	 * Add the thread to the ksegrp's run queue at
357	 * the appropriate place.
358	 */
359	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
360		if (td2->td_priority > td->td_priority) {
361			TAILQ_INSERT_BEFORE(td2, td, td_runq);
362			break;
363		}
364	}
365	if (td2 == NULL) {
366		/* We ran off the end of the TAILQ or it was empty. */
367		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
368	}
369
370	/*
371	 * If we have a ke to use, then put it on the run queue and
372	 * If needed, readjust the last_assigned pointer.
373	 */
374	if (ke) {
375		if (tda == NULL) {
376			/*
377			 * No pre-existing last assigned so whoever is first
378			 * gets the KSE we brought in.. (maybe us)
379			 */
380			td2 = TAILQ_FIRST(&kg->kg_runq);
381			KASSERT((td2->td_kse == NULL),
382			    ("unexpected ke present"));
383			td2->td_kse = ke;
384			ke->ke_thread = td2;
385			kg->kg_last_assigned = td2;
386		} else if (tda->td_priority > td->td_priority) {
387			/*
388			 * It's ours, grab it, but last_assigned is past us
389			 * so don't change it.
390			 */
391			td->td_kse = ke;
392			ke->ke_thread = td;
393		} else {
394			/*
395			 * We are past last_assigned, so
396			 * put the new kse on whatever is next,
397			 * which may or may not be us.
398			 */
399			td2 = TAILQ_NEXT(tda, td_runq);
400			kg->kg_last_assigned = td2;
401			td2->td_kse = ke;
402			ke->ke_thread = td2;
403		}
404		sched_add(ke);
405	}
406}
407
408/************************************************************************
409 * Critical section marker functions					*
410 ************************************************************************/
411/* Critical sections that prevent preemption. */
412void
413critical_enter(void)
414{
415	struct thread *td;
416
417	td = curthread;
418	if (td->td_critnest == 0)
419		cpu_critical_enter();
420	td->td_critnest++;
421}
422
423void
424critical_exit(void)
425{
426	struct thread *td;
427
428	td = curthread;
429	if (td->td_critnest == 1) {
430		td->td_critnest = 0;
431		cpu_critical_exit();
432	} else {
433		td->td_critnest--;
434	}
435}
436
437
438/************************************************************************
439 * SYSTEM RUN QUEUE manipulations and tests				*
440 ************************************************************************/
441/*
442 * Initialize a run structure.
443 */
444void
445runq_init(struct runq *rq)
446{
447	int i;
448
449	bzero(rq, sizeof *rq);
450	for (i = 0; i < RQ_NQS; i++)
451		TAILQ_INIT(&rq->rq_queues[i]);
452}
453
454/*
455 * Clear the status bit of the queue corresponding to priority level pri,
456 * indicating that it is empty.
457 */
458static __inline void
459runq_clrbit(struct runq *rq, int pri)
460{
461	struct rqbits *rqb;
462
463	rqb = &rq->rq_status;
464	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
465	    rqb->rqb_bits[RQB_WORD(pri)],
466	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
467	    RQB_BIT(pri), RQB_WORD(pri));
468	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
469}
470
471/*
472 * Find the index of the first non-empty run queue.  This is done by
473 * scanning the status bits, a set bit indicates a non-empty queue.
474 */
475static __inline int
476runq_findbit(struct runq *rq)
477{
478	struct rqbits *rqb;
479	int pri;
480	int i;
481
482	rqb = &rq->rq_status;
483	for (i = 0; i < RQB_LEN; i++)
484		if (rqb->rqb_bits[i]) {
485			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
486			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
487			    rqb->rqb_bits[i], i, pri);
488			return (pri);
489		}
490
491	return (-1);
492}
493
494/*
495 * Set the status bit of the queue corresponding to priority level pri,
496 * indicating that it is non-empty.
497 */
498static __inline void
499runq_setbit(struct runq *rq, int pri)
500{
501	struct rqbits *rqb;
502
503	rqb = &rq->rq_status;
504	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
505	    rqb->rqb_bits[RQB_WORD(pri)],
506	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
507	    RQB_BIT(pri), RQB_WORD(pri));
508	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
509}
510
511/*
512 * Add the KSE to the queue specified by its priority, and set the
513 * corresponding status bit.
514 */
515void
516runq_add(struct runq *rq, struct kse *ke)
517{
518	struct rqhead *rqh;
519	int pri;
520
521	pri = ke->ke_thread->td_priority / RQ_PPQ;
522	ke->ke_rqindex = pri;
523	runq_setbit(rq, pri);
524	rqh = &rq->rq_queues[pri];
525	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
526	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
527	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
528}
529
530/*
531 * Return true if there are runnable processes of any priority on the run
532 * queue, false otherwise.  Has no side effects, does not modify the run
533 * queue structure.
534 */
535int
536runq_check(struct runq *rq)
537{
538	struct rqbits *rqb;
539	int i;
540
541	rqb = &rq->rq_status;
542	for (i = 0; i < RQB_LEN; i++)
543		if (rqb->rqb_bits[i]) {
544			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
545			    rqb->rqb_bits[i], i);
546			return (1);
547		}
548	CTR0(KTR_RUNQ, "runq_check: empty");
549
550	return (0);
551}
552
553/*
554 * Find the highest priority process on the run queue.
555 */
556struct kse *
557runq_choose(struct runq *rq)
558{
559	struct rqhead *rqh;
560	struct kse *ke;
561	int pri;
562
563	mtx_assert(&sched_lock, MA_OWNED);
564	while ((pri = runq_findbit(rq)) != -1) {
565		rqh = &rq->rq_queues[pri];
566		ke = TAILQ_FIRST(rqh);
567		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
568		CTR3(KTR_RUNQ,
569		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
570		return (ke);
571	}
572	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
573
574	return (NULL);
575}
576
577/*
578 * Remove the KSE from the queue specified by its priority, and clear the
579 * corresponding status bit if the queue becomes empty.
580 * Caller must set ke->ke_state afterwards.
581 */
582void
583runq_remove(struct runq *rq, struct kse *ke)
584{
585	struct rqhead *rqh;
586	int pri;
587
588	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
589		("runq_remove: process swapped out"));
590	pri = ke->ke_rqindex;
591	rqh = &rq->rq_queues[pri];
592	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
593	    ke, ke->ke_thread->td_priority, pri, rqh);
594	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
595	TAILQ_REMOVE(rqh, ke, ke_procq);
596	if (TAILQ_EMPTY(rqh)) {
597		CTR0(KTR_RUNQ, "runq_remove: empty");
598		runq_clrbit(rq, pri);
599	}
600}
601
602#if 0
603void
604panc(char *string1, char *string2)
605{
606	printf("%s", string1);
607	Debugger(string2);
608}
609
610void
611thread_sanity_check(struct thread *td, char *string)
612{
613	struct proc *p;
614	struct ksegrp *kg;
615	struct kse *ke;
616	struct thread *td2 = NULL;
617	unsigned int prevpri;
618	int	saw_lastassigned = 0;
619	int unassigned = 0;
620	int assigned = 0;
621
622	p = td->td_proc;
623	kg = td->td_ksegrp;
624	ke = td->td_kse;
625
626
627	if (ke) {
628		if (p != ke->ke_proc) {
629			panc(string, "wrong proc");
630		}
631		if (ke->ke_thread != td) {
632			panc(string, "wrong thread");
633		}
634	}
635
636	if ((p->p_flag & P_THREADED) == 0) {
637		if (ke == NULL) {
638			panc(string, "non KSE thread lost kse");
639		}
640	} else {
641		prevpri = 0;
642		saw_lastassigned = 0;
643		unassigned = 0;
644		assigned = 0;
645		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
646			if (td2->td_priority < prevpri) {
647				panc(string, "thread runqueue unosorted");
648			}
649			if ((td2->td_state == TDS_RUNQ) &&
650			    td2->td_kse &&
651			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
652				panc(string, "KSE wrong state");
653			}
654			prevpri = td2->td_priority;
655			if (td2->td_kse) {
656				assigned++;
657				if (unassigned) {
658					panc(string, "unassigned before assigned");
659				}
660 				if  (kg->kg_last_assigned == NULL) {
661					panc(string, "lastassigned corrupt");
662				}
663				if (saw_lastassigned) {
664					panc(string, "last assigned not last");
665				}
666				if (td2->td_kse->ke_thread != td2) {
667					panc(string, "mismatched kse/thread");
668				}
669			} else {
670				unassigned++;
671			}
672			if (td2 == kg->kg_last_assigned) {
673				saw_lastassigned = 1;
674				if (td2->td_kse == NULL) {
675					panc(string, "last assigned not assigned");
676				}
677			}
678		}
679		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
680			panc(string, "where on earth does lastassigned point?");
681		}
682#if 0
683		FOREACH_THREAD_IN_GROUP(kg, td2) {
684			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
685			    (TD_ON_RUNQ(td2))) {
686				assigned++;
687				if (td2->td_kse == NULL) {
688					panc(string, "BOUND thread with no KSE");
689				}
690			}
691		}
692#endif
693#if 0
694		if ((unassigned + assigned) != kg->kg_runnable) {
695			panc(string, "wrong number in runnable");
696		}
697#endif
698	}
699	if (assigned == 12345) {
700		printf("%p %p %p %p %p %d, %d",
701		    td, td2, ke, kg, p, assigned, saw_lastassigned);
702	}
703}
704#endif
705
706