subr_taskqueue.c revision 196295
161033Sdfr/*-
261033Sdfr * Copyright (c) 2000 Doug Rabson
361033Sdfr * All rights reserved.
461033Sdfr *
561033Sdfr * Redistribution and use in source and binary forms, with or without
661033Sdfr * modification, are permitted provided that the following conditions
761033Sdfr * are met:
861033Sdfr * 1. Redistributions of source code must retain the above copyright
961033Sdfr *    notice, this list of conditions and the following disclaimer.
1061033Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1161033Sdfr *    notice, this list of conditions and the following disclaimer in the
1261033Sdfr *    documentation and/or other materials provided with the distribution.
1361033Sdfr *
1461033Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1561033Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1661033Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1761033Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1861033Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1961033Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2061033Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2161033Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2261033Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2361033Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2461033Sdfr * SUCH DAMAGE.
2561033Sdfr */
2661033Sdfr
27116182Sobrien#include <sys/cdefs.h>
28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 196295 2009-08-17 09:01:20Z pjd $");
29116182Sobrien
3061033Sdfr#include <sys/param.h>
3185521Sjhb#include <sys/systm.h>
3265822Sjhb#include <sys/bus.h>
3385560Sjhb#include <sys/interrupt.h>
3461033Sdfr#include <sys/kernel.h>
35123614Sjhb#include <sys/kthread.h>
3685521Sjhb#include <sys/lock.h>
3761033Sdfr#include <sys/malloc.h>
3885521Sjhb#include <sys/mutex.h>
39145729Ssam#include <sys/proc.h>
40154333Sscottl#include <sys/sched.h>
4185521Sjhb#include <sys/taskqueue.h>
42119708Sken#include <sys/unistd.h>
43154333Sscottl#include <machine/stdarg.h>
4461033Sdfr
4569774Sphkstatic MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
46123614Sjhbstatic void	*taskqueue_giant_ih;
47123614Sjhbstatic void	*taskqueue_ih;
4861033Sdfrstatic STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
4985521Sjhbstatic struct mtx taskqueue_queues_mutex;
5067551Sjhb
5161033Sdfrstruct taskqueue {
5261033Sdfr	STAILQ_ENTRY(taskqueue)	tq_link;
5361033Sdfr	STAILQ_HEAD(, task)	tq_queue;
5461033Sdfr	const char		*tq_name;
5561033Sdfr	taskqueue_enqueue_fn	tq_enqueue;
5661033Sdfr	void			*tq_context;
57145473Ssam	struct task		*tq_running;
5885521Sjhb	struct mtx		tq_mutex;
59178015Ssam	struct thread		**tq_threads;
60178015Ssam	int			tq_tcount;
61180588Skmacy	int			tq_spin;
62154333Sscottl	int			tq_flags;
6361033Sdfr};
6461033Sdfr
65154333Sscottl#define	TQ_FLAGS_ACTIVE		(1 << 0)
66177621Sscottl#define	TQ_FLAGS_BLOCKED	(1 << 1)
67177621Sscottl#define	TQ_FLAGS_PENDING	(1 << 2)
68154333Sscottl
69180588Skmacystatic __inline void
70180588SkmacyTQ_LOCK(struct taskqueue *tq)
71180588Skmacy{
72180588Skmacy	if (tq->tq_spin)
73180588Skmacy		mtx_lock_spin(&tq->tq_mutex);
74180588Skmacy	else
75180588Skmacy		mtx_lock(&tq->tq_mutex);
76180588Skmacy}
77154167Sscottl
78180588Skmacystatic __inline void
79180588SkmacyTQ_UNLOCK(struct taskqueue *tq)
80180588Skmacy{
81180588Skmacy	if (tq->tq_spin)
82180588Skmacy		mtx_unlock_spin(&tq->tq_mutex);
83180588Skmacy	else
84180588Skmacy		mtx_unlock(&tq->tq_mutex);
85180588Skmacy}
86154167Sscottl
8785521Sjhbstatic void	init_taskqueue_list(void *data);
8885521Sjhb
89154167Sscottlstatic __inline int
90154167SscottlTQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
91154167Sscottl    int t)
92154167Sscottl{
93180588Skmacy	if (tq->tq_spin)
94154167Sscottl		return (msleep_spin(p, m, wm, t));
95154167Sscottl	return (msleep(p, m, pri, wm, t));
96154167Sscottl}
97154167Sscottl
9885521Sjhbstatic void
9985521Sjhbinit_taskqueue_list(void *data __unused)
10085521Sjhb{
10185521Sjhb
10293818Sjhb	mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
10385521Sjhb	STAILQ_INIT(&taskqueue_queues);
10485521Sjhb}
10585521SjhbSYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
10685521Sjhb    NULL);
10785521Sjhb
108154167Sscottlstatic struct taskqueue *
109154167Sscottl_taskqueue_create(const char *name, int mflags,
110145729Ssam		 taskqueue_enqueue_fn enqueue, void *context,
111154167Sscottl		 int mtxflags, const char *mtxname)
11261033Sdfr{
11361033Sdfr	struct taskqueue *queue;
114180588Skmacy
11585521Sjhb	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
11661033Sdfr	if (!queue)
117188058Simp		return NULL;
118180588Skmacy
11961033Sdfr	STAILQ_INIT(&queue->tq_queue);
12061033Sdfr	queue->tq_name = name;
12161033Sdfr	queue->tq_enqueue = enqueue;
12261033Sdfr	queue->tq_context = context;
123180588Skmacy	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
124180588Skmacy	queue->tq_flags |= TQ_FLAGS_ACTIVE;
125154167Sscottl	mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
12661033Sdfr
12785521Sjhb	mtx_lock(&taskqueue_queues_mutex);
12861033Sdfr	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
12985521Sjhb	mtx_unlock(&taskqueue_queues_mutex);
13061033Sdfr
13161033Sdfr	return queue;
13261033Sdfr}
13361033Sdfr
134154167Sscottlstruct taskqueue *
135154167Sscottltaskqueue_create(const char *name, int mflags,
136154333Sscottl		 taskqueue_enqueue_fn enqueue, void *context)
137154167Sscottl{
138154333Sscottl	return _taskqueue_create(name, mflags, enqueue, context,
139154167Sscottl			MTX_DEF, "taskqueue");
140154167Sscottl}
141154167Sscottl
142145729Ssam/*
143145729Ssam * Signal a taskqueue thread to terminate.
144145729Ssam */
145145729Ssamstatic void
146178015Ssamtaskqueue_terminate(struct thread **pp, struct taskqueue *tq)
147145729Ssam{
148145729Ssam
149178015Ssam	while (tq->tq_tcount > 0) {
150154333Sscottl		wakeup(tq);
151154333Sscottl		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
152145729Ssam	}
153145729Ssam}
154145729Ssam
15561033Sdfrvoid
15661033Sdfrtaskqueue_free(struct taskqueue *queue)
15761033Sdfr{
15885521Sjhb
15985521Sjhb	mtx_lock(&taskqueue_queues_mutex);
16061033Sdfr	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
16185521Sjhb	mtx_unlock(&taskqueue_queues_mutex);
16261033Sdfr
163154167Sscottl	TQ_LOCK(queue);
164154333Sscottl	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
165131246Sjhb	taskqueue_run(queue);
166178015Ssam	taskqueue_terminate(queue->tq_threads, queue);
16785521Sjhb	mtx_destroy(&queue->tq_mutex);
168178015Ssam	free(queue->tq_threads, M_TASKQUEUE);
16961033Sdfr	free(queue, M_TASKQUEUE);
17061033Sdfr}
17161033Sdfr
17285521Sjhb/*
17385521Sjhb * Returns with the taskqueue locked.
17485521Sjhb */
17561033Sdfrstruct taskqueue *
17661033Sdfrtaskqueue_find(const char *name)
17761033Sdfr{
17861033Sdfr	struct taskqueue *queue;
17961033Sdfr
18085521Sjhb	mtx_lock(&taskqueue_queues_mutex);
18185521Sjhb	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
182123614Sjhb		if (strcmp(queue->tq_name, name) == 0) {
183154167Sscottl			TQ_LOCK(queue);
18485521Sjhb			mtx_unlock(&taskqueue_queues_mutex);
18561033Sdfr			return queue;
18661033Sdfr		}
18785521Sjhb	}
18885521Sjhb	mtx_unlock(&taskqueue_queues_mutex);
189123614Sjhb	return NULL;
19061033Sdfr}
19161033Sdfr
19261033Sdfrint
19361033Sdfrtaskqueue_enqueue(struct taskqueue *queue, struct task *task)
19461033Sdfr{
19561033Sdfr	struct task *ins;
19661033Sdfr	struct task *prev;
19761033Sdfr
198154167Sscottl	TQ_LOCK(queue);
19985560Sjhb
20061033Sdfr	/*
20161033Sdfr	 * Count multiple enqueues.
20261033Sdfr	 */
203180588Skmacy	if (task->ta_pending) {
20461033Sdfr		task->ta_pending++;
205154167Sscottl		TQ_UNLOCK(queue);
20661033Sdfr		return 0;
20761033Sdfr	}
20861033Sdfr
20961033Sdfr	/*
21061033Sdfr	 * Optimise the case when all tasks have the same priority.
21161033Sdfr	 */
21264199Shsu	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
21361033Sdfr	if (!prev || prev->ta_priority >= task->ta_priority) {
21461033Sdfr		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
21561033Sdfr	} else {
216188058Simp		prev = NULL;
21761033Sdfr		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
21861033Sdfr		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
21961033Sdfr			if (ins->ta_priority < task->ta_priority)
22061033Sdfr				break;
22161033Sdfr
22261033Sdfr		if (prev)
22361033Sdfr			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
22461033Sdfr		else
22561033Sdfr			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
22661033Sdfr	}
22761033Sdfr
22861033Sdfr	task->ta_pending = 1;
229180588Skmacy	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
230177621Sscottl		queue->tq_enqueue(queue->tq_context);
231180588Skmacy	else
232177621Sscottl		queue->tq_flags |= TQ_FLAGS_PENDING;
23385560Sjhb
234154167Sscottl	TQ_UNLOCK(queue);
23585560Sjhb
23661033Sdfr	return 0;
23761033Sdfr}
23861033Sdfr
23961033Sdfrvoid
240177621Sscottltaskqueue_block(struct taskqueue *queue)
241177621Sscottl{
242177621Sscottl
243177621Sscottl	TQ_LOCK(queue);
244177621Sscottl	queue->tq_flags |= TQ_FLAGS_BLOCKED;
245177621Sscottl	TQ_UNLOCK(queue);
246177621Sscottl}
247177621Sscottl
248177621Sscottlvoid
249177621Sscottltaskqueue_unblock(struct taskqueue *queue)
250177621Sscottl{
251177621Sscottl
252177621Sscottl	TQ_LOCK(queue);
253177621Sscottl	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
254177621Sscottl	if (queue->tq_flags & TQ_FLAGS_PENDING) {
255177621Sscottl		queue->tq_flags &= ~TQ_FLAGS_PENDING;
256177621Sscottl		queue->tq_enqueue(queue->tq_context);
257177621Sscottl	}
258177621Sscottl	TQ_UNLOCK(queue);
259177621Sscottl}
260177621Sscottl
261177621Sscottlvoid
26261033Sdfrtaskqueue_run(struct taskqueue *queue)
26361033Sdfr{
26461033Sdfr	struct task *task;
265131246Sjhb	int owned, pending;
26661033Sdfr
267131246Sjhb	owned = mtx_owned(&queue->tq_mutex);
268131246Sjhb	if (!owned)
269154167Sscottl		TQ_LOCK(queue);
27061033Sdfr	while (STAILQ_FIRST(&queue->tq_queue)) {
27161033Sdfr		/*
27261033Sdfr		 * Carefully remove the first task from the queue and
27361033Sdfr		 * zero its pending count.
27461033Sdfr		 */
27561033Sdfr		task = STAILQ_FIRST(&queue->tq_queue);
27661033Sdfr		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
27761033Sdfr		pending = task->ta_pending;
27861033Sdfr		task->ta_pending = 0;
279145473Ssam		queue->tq_running = task;
280154167Sscottl		TQ_UNLOCK(queue);
28161033Sdfr
28285560Sjhb		task->ta_func(task->ta_context, pending);
28361033Sdfr
284154167Sscottl		TQ_LOCK(queue);
285145473Ssam		queue->tq_running = NULL;
286136131Simp		wakeup(task);
28761033Sdfr	}
288131246Sjhb
289131246Sjhb	/*
290131246Sjhb	 * For compatibility, unlock on return if the queue was not locked
291131246Sjhb	 * on entry, although this opens a race window.
292131246Sjhb	 */
293131246Sjhb	if (!owned)
294154167Sscottl		TQ_UNLOCK(queue);
29561033Sdfr}
29661033Sdfr
297136131Simpvoid
298136131Simptaskqueue_drain(struct taskqueue *queue, struct task *task)
299136131Simp{
300180588Skmacy	if (queue->tq_spin) {		/* XXX */
301154167Sscottl		mtx_lock_spin(&queue->tq_mutex);
302154167Sscottl		while (task->ta_pending != 0 || task == queue->tq_running)
303154167Sscottl			msleep_spin(task, &queue->tq_mutex, "-", 0);
304154167Sscottl		mtx_unlock_spin(&queue->tq_mutex);
305154167Sscottl	} else {
306154167Sscottl		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
307145729Ssam
308154167Sscottl		mtx_lock(&queue->tq_mutex);
309154167Sscottl		while (task->ta_pending != 0 || task == queue->tq_running)
310154167Sscottl			msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
311154167Sscottl		mtx_unlock(&queue->tq_mutex);
312154167Sscottl	}
313136131Simp}
314136131Simp
31561033Sdfrstatic void
31661033Sdfrtaskqueue_swi_enqueue(void *context)
31761033Sdfr{
31888900Sjhb	swi_sched(taskqueue_ih, 0);
31961033Sdfr}
32061033Sdfr
32161033Sdfrstatic void
32267551Sjhbtaskqueue_swi_run(void *dummy)
32361033Sdfr{
32461033Sdfr	taskqueue_run(taskqueue_swi);
32561033Sdfr}
32661033Sdfr
327111528Sscottlstatic void
328111528Sscottltaskqueue_swi_giant_enqueue(void *context)
329111528Sscottl{
330111528Sscottl	swi_sched(taskqueue_giant_ih, 0);
331111528Sscottl}
332111528Sscottl
333111528Sscottlstatic void
334111528Sscottltaskqueue_swi_giant_run(void *dummy)
335111528Sscottl{
336111528Sscottl	taskqueue_run(taskqueue_swi_giant);
337111528Sscottl}
338111528Sscottl
339154333Sscottlint
340154333Sscottltaskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
341154333Sscottl			const char *name, ...)
342154333Sscottl{
343154333Sscottl	va_list ap;
344178015Ssam	struct thread *td;
345154333Sscottl	struct taskqueue *tq;
346178015Ssam	int i, error;
347154333Sscottl	char ktname[MAXCOMLEN];
348154333Sscottl
349154333Sscottl	if (count <= 0)
350154333Sscottl		return (EINVAL);
351178015Ssam
352154333Sscottl	tq = *tqp;
353154333Sscottl
354154333Sscottl	va_start(ap, name);
355154333Sscottl	vsnprintf(ktname, MAXCOMLEN, name, ap);
356154333Sscottl	va_end(ap);
357154333Sscottl
358178015Ssam	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
359157314Ssam	    M_NOWAIT | M_ZERO);
360178015Ssam	if (tq->tq_threads == NULL) {
361157314Ssam		printf("%s: no memory for %s threads\n", __func__, ktname);
362157314Ssam		return (ENOMEM);
363157314Ssam	}
364157314Ssam
365154333Sscottl	for (i = 0; i < count; i++) {
366154333Sscottl		if (count == 1)
367178015Ssam			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
368178015Ssam			    &tq->tq_threads[i], RFSTOPPED, 0, ktname);
369154333Sscottl		else
370178015Ssam			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
371178015Ssam			    &tq->tq_threads[i], RFSTOPPED, 0,
372178015Ssam			    "%s_%d", ktname, i);
373158904Ssam		if (error) {
374157314Ssam			/* should be ok to continue, taskqueue_free will dtrt */
375178015Ssam			printf("%s: kthread_add(%s): error %d", __func__,
376178015Ssam			    ktname, error);
377178015Ssam			tq->tq_threads[i] = NULL;		/* paranoid */
378158904Ssam		} else
379178015Ssam			tq->tq_tcount++;
380154333Sscottl	}
381158904Ssam	for (i = 0; i < count; i++) {
382178015Ssam		if (tq->tq_threads[i] == NULL)
383158904Ssam			continue;
384178015Ssam		td = tq->tq_threads[i];
385170307Sjeff		thread_lock(td);
386158904Ssam		sched_prio(td, pri);
387166188Sjeff		sched_add(td, SRQ_BORING);
388170307Sjeff		thread_unlock(td);
389158904Ssam	}
390154333Sscottl
391154333Sscottl	return (0);
392154333Sscottl}
393154333Sscottl
394133305Sjmgvoid
395133305Sjmgtaskqueue_thread_loop(void *arg)
396119708Sken{
397133305Sjmg	struct taskqueue **tqp, *tq;
398131246Sjhb
399133305Sjmg	tqp = arg;
400133305Sjmg	tq = *tqp;
401154167Sscottl	TQ_LOCK(tq);
402188548Sthompsa	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
403133305Sjmg		taskqueue_run(tq);
404196293Spjd		/*
405196293Spjd		 * Because taskqueue_run() can drop tq_mutex, we need to
406196293Spjd		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
407196293Spjd		 * meantime, which means we missed a wakeup.
408196293Spjd		 */
409196293Spjd		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
410196293Spjd			break;
411157815Sjhb		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
412188592Sthompsa	}
413145729Ssam
414145729Ssam	/* rendezvous with thread that asked us to terminate */
415178015Ssam	tq->tq_tcount--;
416178015Ssam	wakeup_one(tq->tq_threads);
417154167Sscottl	TQ_UNLOCK(tq);
418178123Sjhb	kthread_exit();
419119708Sken}
420119708Sken
421133305Sjmgvoid
422119708Skentaskqueue_thread_enqueue(void *context)
423119708Sken{
424133305Sjmg	struct taskqueue **tqp, *tq;
425131246Sjhb
426133305Sjmg	tqp = context;
427133305Sjmg	tq = *tqp;
428133305Sjmg
429133305Sjmg	mtx_assert(&tq->tq_mutex, MA_OWNED);
430145729Ssam	wakeup_one(tq);
431119708Sken}
432119708Sken
433188058SimpTASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
434111528Sscottl		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
435111528Sscottl		     INTR_MPSAFE, &taskqueue_ih));
436111528Sscottl
437188058SimpTASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
438151656Sjhb		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
439111528Sscottl		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
440119708Sken
441133305SjmgTASKQUEUE_DEFINE_THREAD(thread);
442119789Ssam
443154167Sscottlstruct taskqueue *
444154167Sscottltaskqueue_create_fast(const char *name, int mflags,
445154333Sscottl		 taskqueue_enqueue_fn enqueue, void *context)
446119789Ssam{
447154333Sscottl	return _taskqueue_create(name, mflags, enqueue, context,
448154167Sscottl			MTX_SPIN, "fast_taskqueue");
449119789Ssam}
450119789Ssam
451154167Sscottl/* NB: for backwards compatibility */
452154167Sscottlint
453154167Sscottltaskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
454119789Ssam{
455154167Sscottl	return taskqueue_enqueue(queue, task);
456119789Ssam}
457119789Ssam
458119789Ssamstatic void	*taskqueue_fast_ih;
459119789Ssam
460119789Ssamstatic void
461154167Sscottltaskqueue_fast_enqueue(void *context)
462119789Ssam{
463119789Ssam	swi_sched(taskqueue_fast_ih, 0);
464119789Ssam}
465119789Ssam
466119789Ssamstatic void
467119789Ssamtaskqueue_fast_run(void *dummy)
468119789Ssam{
469154167Sscottl	taskqueue_run(taskqueue_fast);
470119789Ssam}
471119789Ssam
472188058SimpTASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
473154167Sscottl	swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
474154167Sscottl	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
475196295Spjd
476196295Spjdint
477196295Spjdtaskqueue_member(struct taskqueue *queue, struct thread *td)
478196295Spjd{
479196295Spjd	int i, j, ret = 0;
480196295Spjd
481196295Spjd	TQ_LOCK(queue);
482196295Spjd	for (i = 0, j = 0; ; i++) {
483196295Spjd		if (queue->tq_threads[i] == NULL)
484196295Spjd			continue;
485196295Spjd		if (queue->tq_threads[i] == td) {
486196295Spjd			ret = 1;
487196295Spjd			break;
488196295Spjd		}
489196295Spjd		if (++j >= queue->tq_tcount)
490196295Spjd			break;
491196295Spjd	}
492196295Spjd	TQ_UNLOCK(queue);
493196295Spjd	return (ret);
494196295Spjd}
495