subr_taskqueue.c revision 275345
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 275345 2014-11-30 19:32:00Z gibbs $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/cpuset.h>
34#include <sys/interrupt.h>
35#include <sys/kernel.h>
36#include <sys/kthread.h>
37#include <sys/limits.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/sched.h>
43#include <sys/taskqueue.h>
44#include <sys/unistd.h>
45#include <machine/stdarg.h>
46
47static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
48static void	*taskqueue_giant_ih;
49static void	*taskqueue_ih;
50static void	 taskqueue_fast_enqueue(void *);
51static void	 taskqueue_swi_enqueue(void *);
52static void	 taskqueue_swi_giant_enqueue(void *);
53
54struct taskqueue_busy {
55	struct task	*tb_running;
56	TAILQ_ENTRY(taskqueue_busy) tb_link;
57};
58
59struct taskqueue {
60	STAILQ_HEAD(, task)	tq_queue;
61	taskqueue_enqueue_fn	tq_enqueue;
62	void			*tq_context;
63	TAILQ_HEAD(, taskqueue_busy) tq_active;
64	struct mtx		tq_mutex;
65	struct thread		**tq_threads;
66	int			tq_tcount;
67	int			tq_spin;
68	int			tq_flags;
69	int			tq_callouts;
70	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
71	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
72};
73
74#define	TQ_FLAGS_ACTIVE		(1 << 0)
75#define	TQ_FLAGS_BLOCKED	(1 << 1)
76#define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
77
78#define	DT_CALLOUT_ARMED	(1 << 0)
79
80#define	TQ_LOCK(tq)							\
81	do {								\
82		if ((tq)->tq_spin)					\
83			mtx_lock_spin(&(tq)->tq_mutex);			\
84		else							\
85			mtx_lock(&(tq)->tq_mutex);			\
86	} while (0)
87#define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
88
89#define	TQ_UNLOCK(tq)							\
90	do {								\
91		if ((tq)->tq_spin)					\
92			mtx_unlock_spin(&(tq)->tq_mutex);		\
93		else							\
94			mtx_unlock(&(tq)->tq_mutex);			\
95	} while (0)
96#define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
97
98void
99_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
100    int priority, task_fn_t func, void *context)
101{
102
103	TASK_INIT(&timeout_task->t, priority, func, context);
104	callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
105	    CALLOUT_RETURNUNLOCKED);
106	timeout_task->q = queue;
107	timeout_task->f = 0;
108}
109
110static __inline int
111TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
112    int t)
113{
114	if (tq->tq_spin)
115		return (msleep_spin(p, m, wm, t));
116	return (msleep(p, m, pri, wm, t));
117}
118
119static struct taskqueue *
120_taskqueue_create(const char *name __unused, int mflags,
121		 taskqueue_enqueue_fn enqueue, void *context,
122		 int mtxflags, const char *mtxname)
123{
124	struct taskqueue *queue;
125
126	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
127	if (!queue)
128		return NULL;
129
130	STAILQ_INIT(&queue->tq_queue);
131	TAILQ_INIT(&queue->tq_active);
132	queue->tq_enqueue = enqueue;
133	queue->tq_context = context;
134	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
135	queue->tq_flags |= TQ_FLAGS_ACTIVE;
136	if (enqueue == taskqueue_fast_enqueue ||
137	    enqueue == taskqueue_swi_enqueue ||
138	    enqueue == taskqueue_swi_giant_enqueue ||
139	    enqueue == taskqueue_thread_enqueue)
140		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
141	mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
142
143	return queue;
144}
145
146struct taskqueue *
147taskqueue_create(const char *name, int mflags,
148		 taskqueue_enqueue_fn enqueue, void *context)
149{
150	return _taskqueue_create(name, mflags, enqueue, context,
151			MTX_DEF, "taskqueue");
152}
153
154void
155taskqueue_set_callback(struct taskqueue *queue,
156    enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
157    void *context)
158{
159
160	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
161	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
162	    ("Callback type %d not valid, must be %d-%d", cb_type,
163	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
164	KASSERT((queue->tq_callbacks[cb_type] == NULL),
165	    ("Re-initialization of taskqueue callback?"));
166
167	queue->tq_callbacks[cb_type] = callback;
168	queue->tq_cb_contexts[cb_type] = context;
169}
170
171/*
172 * Signal a taskqueue thread to terminate.
173 */
174static void
175taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
176{
177
178	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
179		wakeup(tq);
180		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
181	}
182}
183
184void
185taskqueue_free(struct taskqueue *queue)
186{
187
188	TQ_LOCK(queue);
189	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
190	taskqueue_terminate(queue->tq_threads, queue);
191	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
192	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
193	mtx_destroy(&queue->tq_mutex);
194	free(queue->tq_threads, M_TASKQUEUE);
195	free(queue, M_TASKQUEUE);
196}
197
198static int
199taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
200{
201	struct task *ins;
202	struct task *prev;
203
204	/*
205	 * Count multiple enqueues.
206	 */
207	if (task->ta_pending) {
208		if (task->ta_pending < USHRT_MAX)
209			task->ta_pending++;
210		TQ_UNLOCK(queue);
211		return (0);
212	}
213
214	/*
215	 * Optimise the case when all tasks have the same priority.
216	 */
217	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
218	if (!prev || prev->ta_priority >= task->ta_priority) {
219		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
220	} else {
221		prev = NULL;
222		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
223		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
224			if (ins->ta_priority < task->ta_priority)
225				break;
226
227		if (prev)
228			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
229		else
230			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
231	}
232
233	task->ta_pending = 1;
234	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
235		TQ_UNLOCK(queue);
236	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
237		queue->tq_enqueue(queue->tq_context);
238	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
239		TQ_UNLOCK(queue);
240
241	/* Return with lock released. */
242	return (0);
243}
244int
245taskqueue_enqueue(struct taskqueue *queue, struct task *task)
246{
247	int res;
248
249	TQ_LOCK(queue);
250	res = taskqueue_enqueue_locked(queue, task);
251	/* The lock is released inside. */
252
253	return (res);
254}
255
256static void
257taskqueue_timeout_func(void *arg)
258{
259	struct taskqueue *queue;
260	struct timeout_task *timeout_task;
261
262	timeout_task = arg;
263	queue = timeout_task->q;
264	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
265	timeout_task->f &= ~DT_CALLOUT_ARMED;
266	queue->tq_callouts--;
267	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
268	/* The lock is released inside. */
269}
270
271int
272taskqueue_enqueue_timeout(struct taskqueue *queue,
273    struct timeout_task *timeout_task, int ticks)
274{
275	int res;
276
277	TQ_LOCK(queue);
278	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
279	    ("Migrated queue"));
280	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
281	timeout_task->q = queue;
282	res = timeout_task->t.ta_pending;
283	if (ticks == 0) {
284		taskqueue_enqueue_locked(queue, &timeout_task->t);
285		/* The lock is released inside. */
286	} else {
287		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
288			res++;
289		} else {
290			queue->tq_callouts++;
291			timeout_task->f |= DT_CALLOUT_ARMED;
292			if (ticks < 0)
293				ticks = -ticks; /* Ignore overflow. */
294		}
295		if (ticks > 0) {
296			callout_reset(&timeout_task->c, ticks,
297			    taskqueue_timeout_func, timeout_task);
298		}
299		TQ_UNLOCK(queue);
300	}
301	return (res);
302}
303
304static void
305taskqueue_drain_running(struct taskqueue *queue)
306{
307
308	while (!TAILQ_EMPTY(&queue->tq_active))
309		TQ_SLEEP(queue, &queue->tq_active, &queue->tq_mutex,
310		    PWAIT, "-", 0);
311}
312
313void
314taskqueue_block(struct taskqueue *queue)
315{
316
317	TQ_LOCK(queue);
318	queue->tq_flags |= TQ_FLAGS_BLOCKED;
319	TQ_UNLOCK(queue);
320}
321
322void
323taskqueue_unblock(struct taskqueue *queue)
324{
325
326	TQ_LOCK(queue);
327	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
328	if (!STAILQ_EMPTY(&queue->tq_queue))
329		queue->tq_enqueue(queue->tq_context);
330	TQ_UNLOCK(queue);
331}
332
333static void
334taskqueue_run_locked(struct taskqueue *queue)
335{
336	struct taskqueue_busy tb;
337	struct task *task;
338	int pending;
339
340	TQ_ASSERT_LOCKED(queue);
341	tb.tb_running = NULL;
342	TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
343
344	while (STAILQ_FIRST(&queue->tq_queue)) {
345		/*
346		 * Carefully remove the first task from the queue and
347		 * zero its pending count.
348		 */
349		task = STAILQ_FIRST(&queue->tq_queue);
350		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
351		pending = task->ta_pending;
352		task->ta_pending = 0;
353		tb.tb_running = task;
354		TQ_UNLOCK(queue);
355
356		task->ta_func(task->ta_context, pending);
357
358		TQ_LOCK(queue);
359		tb.tb_running = NULL;
360		wakeup(task);
361	}
362	TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
363	if (TAILQ_EMPTY(&queue->tq_active))
364		wakeup(&queue->tq_active);
365}
366
367void
368taskqueue_run(struct taskqueue *queue)
369{
370
371	TQ_LOCK(queue);
372	taskqueue_run_locked(queue);
373	TQ_UNLOCK(queue);
374}
375
376static int
377task_is_running(struct taskqueue *queue, struct task *task)
378{
379	struct taskqueue_busy *tb;
380
381	TQ_ASSERT_LOCKED(queue);
382	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
383		if (tb->tb_running == task)
384			return (1);
385	}
386	return (0);
387}
388
389static int
390taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
391    u_int *pendp)
392{
393
394	if (task->ta_pending > 0)
395		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
396	if (pendp != NULL)
397		*pendp = task->ta_pending;
398	task->ta_pending = 0;
399	return (task_is_running(queue, task) ? EBUSY : 0);
400}
401
402int
403taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
404{
405	int error;
406
407	TQ_LOCK(queue);
408	error = taskqueue_cancel_locked(queue, task, pendp);
409	TQ_UNLOCK(queue);
410
411	return (error);
412}
413
414int
415taskqueue_cancel_timeout(struct taskqueue *queue,
416    struct timeout_task *timeout_task, u_int *pendp)
417{
418	u_int pending, pending1;
419	int error;
420
421	TQ_LOCK(queue);
422	pending = !!callout_stop(&timeout_task->c);
423	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
424	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
425		timeout_task->f &= ~DT_CALLOUT_ARMED;
426		queue->tq_callouts--;
427	}
428	TQ_UNLOCK(queue);
429
430	if (pendp != NULL)
431		*pendp = pending + pending1;
432	return (error);
433}
434
435void
436taskqueue_drain(struct taskqueue *queue, struct task *task)
437{
438
439	if (!queue->tq_spin)
440		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
441
442	TQ_LOCK(queue);
443	while (task->ta_pending != 0 || task_is_running(queue, task))
444		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
445	TQ_UNLOCK(queue);
446}
447
448void
449taskqueue_drain_all(struct taskqueue *queue)
450{
451	struct task *task;
452
453	if (!queue->tq_spin)
454		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
455
456	TQ_LOCK(queue);
457	task = STAILQ_LAST(&queue->tq_queue, task, ta_link);
458	if (task != NULL)
459		while (task->ta_pending != 0)
460			TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
461	taskqueue_drain_running(queue);
462	KASSERT(STAILQ_EMPTY(&queue->tq_queue),
463	    ("taskqueue queue is not empty after draining"));
464	TQ_UNLOCK(queue);
465}
466
467void
468taskqueue_drain_timeout(struct taskqueue *queue,
469    struct timeout_task *timeout_task)
470{
471
472	callout_drain(&timeout_task->c);
473	taskqueue_drain(queue, &timeout_task->t);
474}
475
476static void
477taskqueue_swi_enqueue(void *context)
478{
479	swi_sched(taskqueue_ih, 0);
480}
481
482static void
483taskqueue_swi_run(void *dummy)
484{
485	taskqueue_run(taskqueue_swi);
486}
487
488static void
489taskqueue_swi_giant_enqueue(void *context)
490{
491	swi_sched(taskqueue_giant_ih, 0);
492}
493
494static void
495taskqueue_swi_giant_run(void *dummy)
496{
497	taskqueue_run(taskqueue_swi_giant);
498}
499
500static int
501_taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
502    cpuset_t *mask, const char *ktname)
503{
504	struct thread *td;
505	struct taskqueue *tq;
506	int i, error;
507
508	if (count <= 0)
509		return (EINVAL);
510
511	tq = *tqp;
512
513	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
514	    M_NOWAIT | M_ZERO);
515	if (tq->tq_threads == NULL) {
516		printf("%s: no memory for %s threads\n", __func__, ktname);
517		return (ENOMEM);
518	}
519
520	for (i = 0; i < count; i++) {
521		if (count == 1)
522			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
523			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
524		else
525			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
526			    &tq->tq_threads[i], RFSTOPPED, 0,
527			    "%s_%d", ktname, i);
528		if (error) {
529			/* should be ok to continue, taskqueue_free will dtrt */
530			printf("%s: kthread_add(%s): error %d", __func__,
531			    ktname, error);
532			tq->tq_threads[i] = NULL;		/* paranoid */
533		} else
534			tq->tq_tcount++;
535	}
536	for (i = 0; i < count; i++) {
537		if (tq->tq_threads[i] == NULL)
538			continue;
539		td = tq->tq_threads[i];
540		if (mask) {
541			error = cpuset_setthread(td->td_tid, mask);
542			/*
543			 * Failing to pin is rarely an actual fatal error;
544			 * it'll just affect performance.
545			 */
546			if (error)
547				printf("%s: curthread=%llu: can't pin; "
548				    "error=%d\n",
549				    __func__,
550				    (unsigned long long) td->td_tid,
551				    error);
552		}
553		thread_lock(td);
554		sched_prio(td, pri);
555		sched_add(td, SRQ_BORING);
556		thread_unlock(td);
557	}
558
559	return (0);
560}
561
562int
563taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
564    const char *name, ...)
565{
566	char ktname[MAXCOMLEN + 1];
567	va_list ap;
568
569	va_start(ap, name);
570	vsnprintf(ktname, sizeof(ktname), name, ap);
571	va_end(ap);
572
573	return (_taskqueue_start_threads(tqp, count, pri, NULL, ktname));
574}
575
576int
577taskqueue_start_threads_pinned(struct taskqueue **tqp, int count, int pri,
578    int cpu_id, const char *name, ...)
579{
580	char ktname[MAXCOMLEN + 1];
581	va_list ap;
582	cpuset_t mask;
583
584	va_start(ap, name);
585	vsnprintf(ktname, sizeof(ktname), name, ap);
586	va_end(ap);
587
588	/*
589	 * In case someone passes in NOCPU, just fall back to the
590	 * default behaviour of "don't pin".
591	 */
592	if (cpu_id != NOCPU) {
593		CPU_ZERO(&mask);
594		CPU_SET(cpu_id, &mask);
595	}
596
597	return (_taskqueue_start_threads(tqp, count, pri,
598	    cpu_id == NOCPU ? NULL : &mask, ktname));
599}
600
601static inline void
602taskqueue_run_callback(struct taskqueue *tq,
603    enum taskqueue_callback_type cb_type)
604{
605	taskqueue_callback_fn tq_callback;
606
607	TQ_ASSERT_UNLOCKED(tq);
608	tq_callback = tq->tq_callbacks[cb_type];
609	if (tq_callback != NULL)
610		tq_callback(tq->tq_cb_contexts[cb_type]);
611}
612
613void
614taskqueue_thread_loop(void *arg)
615{
616	struct taskqueue **tqp, *tq;
617
618	tqp = arg;
619	tq = *tqp;
620	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
621	TQ_LOCK(tq);
622	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
623		taskqueue_run_locked(tq);
624		/*
625		 * Because taskqueue_run() can drop tq_mutex, we need to
626		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
627		 * meantime, which means we missed a wakeup.
628		 */
629		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
630			break;
631		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
632	}
633	taskqueue_run_locked(tq);
634
635	/*
636	 * This thread is on its way out, so just drop the lock temporarily
637	 * in order to call the shutdown callback.  This allows the callback
638	 * to look at the taskqueue, even just before it dies.
639	 */
640	TQ_UNLOCK(tq);
641	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
642	TQ_LOCK(tq);
643
644	/* rendezvous with thread that asked us to terminate */
645	tq->tq_tcount--;
646	wakeup_one(tq->tq_threads);
647	TQ_UNLOCK(tq);
648	kthread_exit();
649}
650
651void
652taskqueue_thread_enqueue(void *context)
653{
654	struct taskqueue **tqp, *tq;
655
656	tqp = context;
657	tq = *tqp;
658
659	wakeup_one(tq);
660}
661
662TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
663		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
664		     INTR_MPSAFE, &taskqueue_ih));
665
666TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
667		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
668		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
669
670TASKQUEUE_DEFINE_THREAD(thread);
671
672struct taskqueue *
673taskqueue_create_fast(const char *name, int mflags,
674		 taskqueue_enqueue_fn enqueue, void *context)
675{
676	return _taskqueue_create(name, mflags, enqueue, context,
677			MTX_SPIN, "fast_taskqueue");
678}
679
680/* NB: for backwards compatibility */
681int
682taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
683{
684	return taskqueue_enqueue(queue, task);
685}
686
687static void	*taskqueue_fast_ih;
688
689static void
690taskqueue_fast_enqueue(void *context)
691{
692	swi_sched(taskqueue_fast_ih, 0);
693}
694
695static void
696taskqueue_fast_run(void *dummy)
697{
698	taskqueue_run(taskqueue_fast);
699}
700
701TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
702	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
703	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
704
705int
706taskqueue_member(struct taskqueue *queue, struct thread *td)
707{
708	int i, j, ret = 0;
709
710	for (i = 0, j = 0; ; i++) {
711		if (queue->tq_threads[i] == NULL)
712			continue;
713		if (queue->tq_threads[i] == td) {
714			ret = 1;
715			break;
716		}
717		if (++j >= queue->tq_tcount)
718			break;
719	}
720	return (ret);
721}
722