1/*-
2 * Copyright (c) 2015 Antti Kantee.  All Rights Reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
14 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * Historically based on the Xen Mini-OS scheduler by Grzegorz Milos,
28 * rewritten to deal with multiple infrequently running threads in the
29 * current reincarnation.
30 */
31
32#include <bmk-core/core.h>
33#include <bmk-core/errno.h>
34#include <bmk-core/memalloc.h>
35#include <bmk-core/platform.h>
36#include <bmk-core/pgalloc.h>
37#include <bmk-core/printf.h>
38#include <bmk-core/queue.h>
39#include <bmk-core/string.h>
40#include <bmk-core/sched.h>
41
42void *bmk_mainstackbase;
43unsigned long bmk_mainstacksize;
44
45/*
46 * sleep for how long if there's absolutely nothing to do
47 * (default 1s)
48 */
49#define BLOCKTIME_MAX (1*1000*1000*1000)
50
51#define NAME_MAXLEN 16
52
53/* flags and their meanings + invariants */
54#define THR_RUNQ	0x0001		/* on runq, can be run		*/
55#define THR_TIMEQ	0x0002		/* on timeq, blocked w/ timeout	*/
56#define THR_BLOCKQ	0x0004		/* on blockq, indefinite block	*/
57#define THR_QMASK	0x0007
58#define THR_RUNNING	0x0008		/* no queue, thread == current	*/
59
60#define THR_TIMEDOUT	0x0010
61#define THR_MUSTJOIN	0x0020
62#define THR_JOINED	0x0040
63
64#define THR_EXTSTACK	0x0100
65#define THR_DEAD	0x0200
66#define THR_BLOCKPREP	0x0400
67
68#if !(defined(__i386__) || defined(__x86_64__))
69#define _TLS_I
70#else
71#define _TLS_II
72#endif
73
74extern const char _rump_tdata_start[], _rump_tdata_end[];
75extern const char _rump_tbss_start[], _rump_tbss_end[];
76#define TDATASIZE (_rump_tdata_end - _rump_tdata_start)
77#define TBSSSIZE (_rump_tbss_end - _rump_tbss_start)
78#define TMEMSIZE \
79    (((TDATASIZE + TBSSSIZE + sizeof(void *)-1)/sizeof(void *))*sizeof(void *))
80#ifdef _TLS_I
81#define TCBOFFSET 0
82#else
83#define TCBOFFSET TMEMSIZE
84#endif
85#define TLSAREASIZE (TMEMSIZE + BMK_TLS_EXTRA)
86
87struct bmk_thread {
88	char bt_name[NAME_MAXLEN];
89
90	bmk_time_t bt_wakeup_time;
91
92	int bt_flags;
93	int bt_errno;
94
95	void *bt_stackbase;
96
97	void *bt_cookie;
98
99	/* MD thread control block */
100	struct bmk_tcb bt_tcb;
101
102	TAILQ_ENTRY(bmk_thread) bt_schedq;
103	TAILQ_ENTRY(bmk_thread) bt_threadq;
104};
105__thread struct bmk_thread *bmk_current;
106
107TAILQ_HEAD(threadqueue, bmk_thread);
108static struct threadqueue threadq = TAILQ_HEAD_INITIALIZER(threadq);
109static struct threadqueue zombieq = TAILQ_HEAD_INITIALIZER(zombieq);
110
111/*
112 * We have 3 different queues for theoretically runnable threads:
113 * 1) runnable threads waiting to be scheduled
114 * 2) threads waiting for a timeout to expire (or to be woken up)
115 * 3) threads waiting indefinitely for a wakeup
116 *
117 * Rules: while running, threads are on no schedq.  Threads can block
118 *        only themselves (though that needs revisiting for "suspend").
119 *        when blocked, threads will move either to blockq or timeq.
120 *        When a thread is woken up (possibly by itself of a timeout
121 *        expires), the thread will move to the runnable queue.  Wakeups
122 *        while a thread is already in the runnable queue or while
123 *        running (via interrupt handler) have no effect.
124 */
125static struct threadqueue runq = TAILQ_HEAD_INITIALIZER(runq);
126static struct threadqueue blockq = TAILQ_HEAD_INITIALIZER(blockq);
127static struct threadqueue timeq = TAILQ_HEAD_INITIALIZER(timeq);
128
129static void (*scheduler_hook)(void *, void *);
130
131static void
132print_threadinfo(struct bmk_thread *thread)
133{
134
135	bmk_printf("thread \"%s\" at %p, flags 0x%x\n",
136	    thread->bt_name, thread, thread->bt_flags);
137}
138
139static inline void
140setflags(struct bmk_thread *thread, int add, int remove)
141{
142
143	thread->bt_flags &= ~remove;
144	thread->bt_flags |= add;
145}
146
147static void
148set_runnable(struct bmk_thread *thread)
149{
150	struct threadqueue *tq;
151	int tflags;
152	int flags;
153
154	tflags = thread->bt_flags;
155	/*
156	 * Already runnable?  Nothing to do, then.
157	 */
158	if ((tflags & THR_RUNQ) == THR_RUNQ)
159		return;
160
161	/* get current queue */
162	switch (tflags & THR_QMASK) {
163	case THR_TIMEQ:
164		tq = &timeq;
165		break;
166	case THR_BLOCKQ:
167		tq = &blockq;
168		break;
169	default:
170		/*
171		 * Are we running and not blocked?  Might be that we were
172		 * called from an interrupt handler.  Can just ignore
173		 * this whole thing.
174		 */
175		if ((tflags & (THR_RUNNING|THR_QMASK)) == THR_RUNNING)
176			return;
177
178		print_threadinfo(thread);
179		bmk_platform_halt("invalid thread queue");
180	}
181
182	/*
183	 * Else, target was blocked and need to make it runnable
184	 */
185	flags = bmk_platform_splhigh();
186	TAILQ_REMOVE(tq, thread, bt_schedq);
187	setflags(thread, THR_RUNQ, THR_QMASK);
188	TAILQ_INSERT_TAIL(&runq, thread, bt_schedq);
189	bmk_platform_splx(flags);
190}
191
192/*
193 * Insert thread into timeq at the correct place.
194 */
195static void
196timeq_sorted_insert(struct bmk_thread *thread)
197{
198	struct bmk_thread *iter;
199
200	bmk_assert(thread->bt_wakeup_time != BMK_SCHED_BLOCK_INFTIME);
201
202	/* case1: no others */
203	if (TAILQ_EMPTY(&timeq)) {
204		TAILQ_INSERT_HEAD(&timeq, thread, bt_schedq);
205		return;
206	}
207
208	/* case2: not last in queue */
209	TAILQ_FOREACH(iter, &timeq, bt_schedq) {
210		if (iter->bt_wakeup_time > thread->bt_wakeup_time) {
211			TAILQ_INSERT_BEFORE(iter, thread, bt_schedq);
212			return;
213		}
214	}
215
216	/* case3: last in queue with greatest current timeout */
217	bmk_assert(TAILQ_LAST(&timeq, threadqueue)->bt_wakeup_time
218	    <= thread->bt_wakeup_time);
219	TAILQ_INSERT_TAIL(&timeq, thread, bt_schedq);
220}
221
222/*
223 * Called with interrupts disabled
224 */
225static void
226clear_runnable(void)
227{
228	struct bmk_thread *thread = bmk_current;
229	int newfl;
230
231	bmk_assert(thread->bt_flags & THR_RUNNING);
232
233	/*
234	 * Currently we require that a thread will block only
235	 * once before calling the scheduler.
236	 */
237	bmk_assert((thread->bt_flags & THR_RUNQ) == 0);
238
239	newfl = thread->bt_flags;
240	if (thread->bt_wakeup_time != BMK_SCHED_BLOCK_INFTIME) {
241		newfl |= THR_TIMEQ;
242		timeq_sorted_insert(thread);
243	} else {
244		newfl |= THR_BLOCKQ;
245		TAILQ_INSERT_TAIL(&blockq, thread, bt_schedq);
246	}
247	thread->bt_flags = newfl;
248}
249
250static void
251stackalloc(void **stack, unsigned long *ss)
252{
253
254	*stack = bmk_pgalloc(bmk_stackpageorder);
255	*ss = bmk_stacksize;
256}
257
258static void
259stackfree(struct bmk_thread *thread)
260{
261
262	bmk_pgfree(thread->bt_stackbase, bmk_stackpageorder);
263}
264
265void
266bmk_sched_dumpqueue(void)
267{
268	struct bmk_thread *thr;
269
270	bmk_printf("BEGIN runq dump\n");
271	TAILQ_FOREACH(thr, &runq, bt_schedq) {
272		print_threadinfo(thr);
273	}
274	bmk_printf("END runq dump\n");
275
276	bmk_printf("BEGIN timeq dump\n");
277	TAILQ_FOREACH(thr, &timeq, bt_schedq) {
278		print_threadinfo(thr);
279	}
280	bmk_printf("END timeq dump\n");
281
282	bmk_printf("BEGIN blockq dump\n");
283	TAILQ_FOREACH(thr, &blockq, bt_schedq) {
284		print_threadinfo(thr);
285	}
286	bmk_printf("END blockq dump\n");
287}
288
289static void
290sched_switch(struct bmk_thread *prev, struct bmk_thread *next)
291{
292
293	bmk_assert(next->bt_flags & THR_RUNNING);
294	bmk_assert((next->bt_flags & THR_QMASK) == 0);
295
296	if (scheduler_hook)
297		scheduler_hook(prev->bt_cookie, next->bt_cookie);
298	bmk_platform_cpu_sched_settls(&next->bt_tcb);
299	bmk_cpu_sched_switch(&prev->bt_tcb, &next->bt_tcb);
300}
301
302static void
303schedule(void)
304{
305	struct bmk_thread *prev, *next, *thread;
306	unsigned long flags;
307
308	prev = bmk_current;
309
310	flags = bmk_platform_splhigh();
311	if (flags) {
312		bmk_platform_halt("schedule() called at !spl0");
313	}
314	for (;;) {
315		bmk_time_t curtime, waketime;
316
317		curtime = bmk_platform_cpu_clock_monotonic();
318		waketime = curtime + BLOCKTIME_MAX;
319
320		/*
321		 * Process timeout queue first by moving threads onto
322		 * the runqueue if their timeouts have expired.  Since
323		 * the timeouts are sorted, we process until we hit the
324		 * first one which will not be woked up.
325		 */
326		while ((thread = TAILQ_FIRST(&timeq)) != NULL) {
327			if (thread->bt_wakeup_time <= curtime) {
328				/*
329				 * move thread to runqueue.
330				 * threads will run in inverse order of timeout
331				 * expiry.  not sure if that matters or not.
332				 */
333				thread->bt_flags |= THR_TIMEDOUT;
334				bmk_sched_wake(thread);
335			} else {
336				if (thread->bt_wakeup_time < waketime)
337					waketime = thread->bt_wakeup_time;
338				break;
339			}
340		}
341
342		if ((next = TAILQ_FIRST(&runq)) != NULL) {
343			bmk_assert(next->bt_flags & THR_RUNQ);
344			bmk_assert((next->bt_flags & THR_DEAD) == 0);
345			break;
346		}
347
348		/*
349		 * Nothing to run, block until waketime or until an interrupt
350		 * occurs, whichever happens first.  The call will enable
351		 * interrupts "atomically" before actually blocking.
352		 */
353		bmk_platform_cpu_block(waketime);
354	}
355	/* now we're committed to letting "next" run next */
356	setflags(prev, 0, THR_RUNNING);
357
358	TAILQ_REMOVE(&runq, next, bt_schedq);
359	setflags(next, THR_RUNNING, THR_RUNQ);
360	bmk_platform_splx(flags);
361
362	/*
363	 * No switch can happen if:
364	 *  + timeout expired while we were in here
365	 *  + interrupt handler woke us up before anything else was scheduled
366	 */
367	if (prev != next) {
368		sched_switch(prev, next);
369	}
370
371	/*
372	 * Reaper.  This always runs in the context of the first "non-virgin"
373	 * thread that was scheduled after the current thread decided to exit.
374	 */
375	while ((thread = TAILQ_FIRST(&zombieq)) != NULL) {
376		TAILQ_REMOVE(&zombieq, thread, bt_threadq);
377		if ((thread->bt_flags & THR_EXTSTACK) == 0)
378			stackfree(thread);
379		bmk_memfree(thread, BMK_MEMWHO_WIREDBMK);
380	}
381}
382
383/*
384 * Allocate tls and initialize it.
385 * NOTE: does not initialize tcb, see inittcb().
386 */
387void *
388bmk_sched_tls_alloc(void)
389{
390	char *tlsmem, *p;
391
392	tlsmem = p = bmk_memalloc(TLSAREASIZE, 0, BMK_MEMWHO_WIREDBMK);
393#ifdef _TLS_I
394	bmk_memset(p, 0, 2*sizeof(void *));
395	p += 2 * sizeof(void *);
396#endif
397	bmk_memcpy(p, _rump_tdata_start, TDATASIZE);
398	bmk_memset(p + TDATASIZE, 0, TBSSSIZE);
399
400	return tlsmem + TCBOFFSET;
401}
402
403/*
404 * Free tls
405 */
406void
407bmk_sched_tls_free(void *mem)
408{
409
410	mem = (void *)((unsigned long)mem - TCBOFFSET);
411	bmk_memfree(mem, BMK_MEMWHO_WIREDBMK);
412}
413
414void *
415bmk_sched_gettcb(void)
416{
417
418	return (void *)bmk_current->bt_tcb.btcb_tp;
419}
420
421static void
422inittcb(struct bmk_tcb *tcb, void *tlsarea, unsigned long tlssize)
423{
424
425#ifdef _TLS_II
426	*(void **)tlsarea = tlsarea;
427#endif
428	tcb->btcb_tp = (unsigned long)tlsarea;
429	tcb->btcb_tpsize = tlssize;
430}
431
432static void
433initcurrent(void *tcb, struct bmk_thread *value)
434{
435	bmk_platform_cpu_sched_initcurrent(tcb, value);
436}
437
438struct bmk_thread *
439bmk_sched_create_withtls(const char *name, void *cookie, int joinable,
440	void (*f)(void *), void *data,
441	void *stack_base, unsigned long stack_size, void *tlsarea)
442{
443	struct bmk_thread *thread;
444	unsigned long flags;
445
446	thread = bmk_xmalloc_bmk(sizeof(*thread));
447	bmk_memset(thread, 0, sizeof(*thread));
448	bmk_strncpy(thread->bt_name, name, sizeof(thread->bt_name)-1);
449
450	if (!stack_base) {
451		bmk_assert(stack_size == 0);
452		stackalloc(&stack_base, &stack_size);
453	} else {
454		thread->bt_flags = THR_EXTSTACK;
455	}
456	thread->bt_stackbase = stack_base;
457	if (joinable)
458		thread->bt_flags |= THR_MUSTJOIN;
459
460	bmk_cpu_sched_create(thread, &thread->bt_tcb, f, data,
461	    stack_base, stack_size);
462
463	thread->bt_cookie = cookie;
464	thread->bt_wakeup_time = BMK_SCHED_BLOCK_INFTIME;
465
466	inittcb(&thread->bt_tcb, tlsarea, TCBOFFSET);
467	initcurrent(tlsarea, thread);
468
469	TAILQ_INSERT_TAIL(&threadq, thread, bt_threadq);
470
471	/* set runnable manually, we don't satisfy invariants yet */
472	flags = bmk_platform_splhigh();
473	TAILQ_INSERT_TAIL(&runq, thread, bt_schedq);
474	thread->bt_flags |= THR_RUNQ;
475	bmk_platform_splx(flags);
476
477	return thread;
478}
479
480struct bmk_thread *
481bmk_sched_create(const char *name, void *cookie, int joinable,
482	void (*f)(void *), void *data,
483	void *stack_base, unsigned long stack_size)
484{
485	void *tlsarea;
486
487	tlsarea = bmk_sched_tls_alloc();
488	return bmk_sched_create_withtls(name, cookie, joinable, f, data,
489	    stack_base, stack_size, tlsarea);
490}
491
492struct join_waiter {
493	struct bmk_thread *jw_thread;
494	struct bmk_thread *jw_wanted;
495	TAILQ_ENTRY(join_waiter) jw_entries;
496};
497static TAILQ_HEAD(, join_waiter) joinwq = TAILQ_HEAD_INITIALIZER(joinwq);
498
499void
500bmk_sched_exit_withtls(void)
501{
502	struct bmk_thread *thread = bmk_current;
503	struct join_waiter *jw_iter;
504	unsigned long flags;
505
506	/* if joinable, gate until we are allowed to exit */
507	flags = bmk_platform_splhigh();
508	while (thread->bt_flags & THR_MUSTJOIN) {
509		thread->bt_flags |= THR_JOINED;
510		bmk_platform_splx(flags);
511
512		/* see if the joiner is already there */
513		TAILQ_FOREACH(jw_iter, &joinwq, jw_entries) {
514			if (jw_iter->jw_wanted == thread) {
515				bmk_sched_wake(jw_iter->jw_thread);
516				break;
517			}
518		}
519		bmk_sched_blockprepare();
520		bmk_sched_block();
521		flags = bmk_platform_splhigh();
522	}
523
524	/* Remove from the thread list */
525	bmk_assert((thread->bt_flags & THR_QMASK) == 0);
526	TAILQ_REMOVE(&threadq, thread, bt_threadq);
527	setflags(thread, THR_DEAD, THR_RUNNING);
528
529	/* Put onto exited list */
530	TAILQ_INSERT_HEAD(&zombieq, thread, bt_threadq);
531	bmk_platform_splx(flags);
532
533	/* bye */
534	schedule();
535	bmk_platform_halt("schedule() returned for a dead thread!\n");
536}
537
538void
539bmk_sched_exit(void)
540{
541
542	bmk_sched_tls_free((void *)bmk_current->bt_tcb.btcb_tp);
543	bmk_sched_exit_withtls();
544}
545
546void
547bmk_sched_join(struct bmk_thread *joinable)
548{
549	struct join_waiter jw;
550	struct bmk_thread *thread = bmk_current;
551	unsigned long flags;
552
553	bmk_assert(joinable->bt_flags & THR_MUSTJOIN);
554
555	flags = bmk_platform_splhigh();
556	/* wait for exiting thread to hit thread_exit() */
557	while ((joinable->bt_flags & THR_JOINED) == 0) {
558		bmk_platform_splx(flags);
559
560		jw.jw_thread = thread;
561		jw.jw_wanted = joinable;
562		TAILQ_INSERT_TAIL(&joinwq, &jw, jw_entries);
563		bmk_sched_blockprepare();
564		bmk_sched_block();
565		TAILQ_REMOVE(&joinwq, &jw, jw_entries);
566
567		flags = bmk_platform_splhigh();
568	}
569
570	/* signal exiting thread that we have seen it and it may now exit */
571	bmk_assert(joinable->bt_flags & THR_JOINED);
572	joinable->bt_flags &= ~THR_MUSTJOIN;
573	bmk_platform_splx(flags);
574
575	bmk_sched_wake(joinable);
576}
577
578/*
579 * These suspend calls are different from block calls in the that
580 * can be used to block other threads.  The only reason we need these
581 * was because someone was clever enough to invent _np interfaces for
582 * libpthread which allow randomly suspending other threads.
583 */
584void
585bmk_sched_suspend(struct bmk_thread *thread)
586{
587
588	bmk_platform_halt("sched_suspend unimplemented");
589}
590
591void
592bmk_sched_unsuspend(struct bmk_thread *thread)
593{
594
595	bmk_platform_halt("sched_unsuspend unimplemented");
596}
597
598void
599bmk_sched_blockprepare_timeout(bmk_time_t deadline)
600{
601	struct bmk_thread *thread = bmk_current;
602	int flags;
603
604	bmk_assert((thread->bt_flags & THR_BLOCKPREP) == 0);
605
606	flags = bmk_platform_splhigh();
607	thread->bt_wakeup_time = deadline;
608	thread->bt_flags |= THR_BLOCKPREP;
609	clear_runnable();
610	bmk_platform_splx(flags);
611}
612
613void
614bmk_sched_blockprepare(void)
615{
616
617	bmk_sched_blockprepare_timeout(BMK_SCHED_BLOCK_INFTIME);
618}
619
620int
621bmk_sched_block(void)
622{
623	struct bmk_thread *thread = bmk_current;
624	int tflags;
625
626	bmk_assert((thread->bt_flags & THR_TIMEDOUT) == 0);
627	bmk_assert(thread->bt_flags & THR_BLOCKPREP);
628
629	schedule();
630
631	tflags = thread->bt_flags;
632	thread->bt_flags &= ~(THR_TIMEDOUT | THR_BLOCKPREP);
633
634	return tflags & THR_TIMEDOUT ? BMK_ETIMEDOUT : 0;
635}
636
637void
638bmk_sched_wake(struct bmk_thread *thread)
639{
640
641	thread->bt_wakeup_time = BMK_SCHED_BLOCK_INFTIME;
642	set_runnable(thread);
643}
644
645void __attribute__((noreturn))
646bmk_sched_startmain(void (*mainfun)(void *), void *arg)
647{
648	struct bmk_thread *mainthread;
649	struct bmk_thread initthread;
650
651	bmk_memset(&initthread, 0, sizeof(initthread));
652	bmk_strcpy(initthread.bt_name, "init");
653	stackalloc(&bmk_mainstackbase, &bmk_mainstacksize);
654
655	mainthread = bmk_sched_create("main", NULL, 0,
656	    mainfun, arg, bmk_mainstackbase, bmk_mainstacksize);
657	if (mainthread == NULL)
658		bmk_platform_halt("failed to create main thread");
659
660	/*
661	 * Manually switch to mainthread without going through
662	 * bmk_sched (avoids confusion with bmk_current).
663	 */
664	TAILQ_REMOVE(&runq, mainthread, bt_schedq);
665	setflags(mainthread, THR_RUNNING, THR_RUNQ);
666	sched_switch(&initthread, mainthread);
667
668	bmk_platform_halt("bmk_sched_init unreachable");
669}
670
671void
672bmk_sched_set_hook(void (*f)(void *, void *))
673{
674
675	scheduler_hook = f;
676}
677
678struct bmk_thread *
679bmk_sched_init_mainlwp(void *cookie)
680{
681
682	bmk_current->bt_cookie = cookie;
683	return bmk_current;
684}
685
686const char *
687bmk_sched_threadname(struct bmk_thread *thread)
688{
689
690	return thread->bt_name;
691}
692
693/*
694 * XXX: this does not really belong here, but libbmk_rumpuser needs
695 * to be able to set an errno, so we can't push it into libc without
696 * violating abstraction layers.
697 */
698int *
699bmk_sched_geterrno(void)
700{
701
702	return &bmk_current->bt_errno;
703}
704
705void
706bmk_sched_yield(void)
707{
708	struct bmk_thread *thread = bmk_current;
709	int flags;
710
711	bmk_assert(thread->bt_flags & THR_RUNNING);
712
713	/* make schedulable and re-insert into runqueue */
714	flags = bmk_platform_splhigh();
715	setflags(thread, THR_RUNQ, THR_RUNNING);
716	TAILQ_INSERT_TAIL(&runq, thread, bt_schedq);
717	bmk_platform_splx(flags);
718
719	schedule();
720}
721