subr_taskqueue.c revision 323447
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/kern/subr_taskqueue.c 323447 2017-09-11 17:32:26Z ian $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/cpuset.h>
34#include <sys/interrupt.h>
35#include <sys/kernel.h>
36#include <sys/kthread.h>
37#include <sys/libkern.h>
38#include <sys/limits.h>
39#include <sys/lock.h>
40#include <sys/malloc.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/taskqueue.h>
46#include <sys/unistd.h>
47#include <machine/stdarg.h>
48
49static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
50static void	*taskqueue_giant_ih;
51static void	*taskqueue_ih;
52static void	 taskqueue_fast_enqueue(void *);
53static void	 taskqueue_swi_enqueue(void *);
54static void	 taskqueue_swi_giant_enqueue(void *);
55
56struct taskqueue_busy {
57	struct task	*tb_running;
58	TAILQ_ENTRY(taskqueue_busy) tb_link;
59};
60
61struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
62
63struct taskqueue {
64	STAILQ_HEAD(, task)	tq_queue;
65	taskqueue_enqueue_fn	tq_enqueue;
66	void			*tq_context;
67	char			*tq_name;
68	TAILQ_HEAD(, taskqueue_busy) tq_active;
69	struct mtx		tq_mutex;
70	struct thread		**tq_threads;
71	int			tq_tcount;
72	int			tq_spin;
73	int			tq_flags;
74	int			tq_callouts;
75	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
76	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
77};
78
79#define	TQ_FLAGS_ACTIVE		(1 << 0)
80#define	TQ_FLAGS_BLOCKED	(1 << 1)
81#define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
82
83#define	DT_CALLOUT_ARMED	(1 << 0)
84#define	DT_DRAIN_IN_PROGRESS	(1 << 1)
85
86#define	TQ_LOCK(tq)							\
87	do {								\
88		if ((tq)->tq_spin)					\
89			mtx_lock_spin(&(tq)->tq_mutex);			\
90		else							\
91			mtx_lock(&(tq)->tq_mutex);			\
92	} while (0)
93#define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
94
95#define	TQ_UNLOCK(tq)							\
96	do {								\
97		if ((tq)->tq_spin)					\
98			mtx_unlock_spin(&(tq)->tq_mutex);		\
99		else							\
100			mtx_unlock(&(tq)->tq_mutex);			\
101	} while (0)
102#define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
103
104void
105_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
106    int priority, task_fn_t func, void *context)
107{
108
109	TASK_INIT(&timeout_task->t, priority, func, context);
110	callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
111	    CALLOUT_RETURNUNLOCKED);
112	timeout_task->q = queue;
113	timeout_task->f = 0;
114}
115
116static __inline int
117TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
118    int t)
119{
120	if (tq->tq_spin)
121		return (msleep_spin(p, m, wm, t));
122	return (msleep(p, m, pri, wm, t));
123}
124
125static struct taskqueue *
126_taskqueue_create(const char *name, int mflags,
127		 taskqueue_enqueue_fn enqueue, void *context,
128		 int mtxflags, const char *mtxname __unused)
129{
130	struct taskqueue *queue;
131	char *tq_name;
132
133	tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
134	if (tq_name == NULL)
135		return (NULL);
136
137	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
138	if (queue == NULL) {
139		free(tq_name, M_TASKQUEUE);
140		return (NULL);
141	}
142
143	snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
144
145	STAILQ_INIT(&queue->tq_queue);
146	TAILQ_INIT(&queue->tq_active);
147	queue->tq_enqueue = enqueue;
148	queue->tq_context = context;
149	queue->tq_name = tq_name;
150	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
151	queue->tq_flags |= TQ_FLAGS_ACTIVE;
152	if (enqueue == taskqueue_fast_enqueue ||
153	    enqueue == taskqueue_swi_enqueue ||
154	    enqueue == taskqueue_swi_giant_enqueue ||
155	    enqueue == taskqueue_thread_enqueue)
156		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
157	mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
158
159	return (queue);
160}
161
162struct taskqueue *
163taskqueue_create(const char *name, int mflags,
164		 taskqueue_enqueue_fn enqueue, void *context)
165{
166
167	return _taskqueue_create(name, mflags, enqueue, context,
168			MTX_DEF, name);
169}
170
171void
172taskqueue_set_callback(struct taskqueue *queue,
173    enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
174    void *context)
175{
176
177	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
178	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
179	    ("Callback type %d not valid, must be %d-%d", cb_type,
180	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
181	KASSERT((queue->tq_callbacks[cb_type] == NULL),
182	    ("Re-initialization of taskqueue callback?"));
183
184	queue->tq_callbacks[cb_type] = callback;
185	queue->tq_cb_contexts[cb_type] = context;
186}
187
188/*
189 * Signal a taskqueue thread to terminate.
190 */
191static void
192taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
193{
194
195	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
196		wakeup(tq);
197		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
198	}
199}
200
201void
202taskqueue_free(struct taskqueue *queue)
203{
204
205	TQ_LOCK(queue);
206	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
207	taskqueue_terminate(queue->tq_threads, queue);
208	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
209	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
210	mtx_destroy(&queue->tq_mutex);
211	free(queue->tq_threads, M_TASKQUEUE);
212	free(queue->tq_name, M_TASKQUEUE);
213	free(queue, M_TASKQUEUE);
214}
215
216static int
217taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
218{
219	struct task *ins;
220	struct task *prev;
221
222	KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
223	/*
224	 * Count multiple enqueues.
225	 */
226	if (task->ta_pending) {
227		if (task->ta_pending < USHRT_MAX)
228			task->ta_pending++;
229		TQ_UNLOCK(queue);
230		return (0);
231	}
232
233	/*
234	 * Optimise the case when all tasks have the same priority.
235	 */
236	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
237	if (!prev || prev->ta_priority >= task->ta_priority) {
238		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
239	} else {
240		prev = NULL;
241		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
242		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
243			if (ins->ta_priority < task->ta_priority)
244				break;
245
246		if (prev)
247			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
248		else
249			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
250	}
251
252	task->ta_pending = 1;
253	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
254		TQ_UNLOCK(queue);
255	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
256		queue->tq_enqueue(queue->tq_context);
257	if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
258		TQ_UNLOCK(queue);
259
260	/* Return with lock released. */
261	return (0);
262}
263
264int
265taskqueue_enqueue(struct taskqueue *queue, struct task *task)
266{
267	int res;
268
269	TQ_LOCK(queue);
270	res = taskqueue_enqueue_locked(queue, task);
271	/* The lock is released inside. */
272
273	return (res);
274}
275
276static void
277taskqueue_timeout_func(void *arg)
278{
279	struct taskqueue *queue;
280	struct timeout_task *timeout_task;
281
282	timeout_task = arg;
283	queue = timeout_task->q;
284	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
285	timeout_task->f &= ~DT_CALLOUT_ARMED;
286	queue->tq_callouts--;
287	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
288	/* The lock is released inside. */
289}
290
291int
292taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
293    struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags)
294{
295	int res;
296
297	TQ_LOCK(queue);
298	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
299	    ("Migrated queue"));
300	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
301	timeout_task->q = queue;
302	res = timeout_task->t.ta_pending;
303	if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
304		/* Do nothing */
305		TQ_UNLOCK(queue);
306		res = -1;
307	} else if (sbt == 0) {
308		taskqueue_enqueue_locked(queue, &timeout_task->t);
309		/* The lock is released inside. */
310	} else {
311		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
312			res++;
313		} else {
314			queue->tq_callouts++;
315			timeout_task->f |= DT_CALLOUT_ARMED;
316			if (sbt < 0)
317				sbt = -sbt; /* Ignore overflow. */
318		}
319		if (sbt > 0) {
320			callout_reset_sbt(&timeout_task->c, sbt, pr,
321			    taskqueue_timeout_func, timeout_task, flags);
322		}
323		TQ_UNLOCK(queue);
324	}
325	return (res);
326}
327
328int
329taskqueue_enqueue_timeout(struct taskqueue *queue,
330    struct timeout_task *ttask, int ticks)
331{
332
333	return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt,
334	    0, 0));
335}
336
337static void
338taskqueue_task_nop_fn(void *context, int pending)
339{
340}
341
342/*
343 * Block until all currently queued tasks in this taskqueue
344 * have begun execution.  Tasks queued during execution of
345 * this function are ignored.
346 */
347static void
348taskqueue_drain_tq_queue(struct taskqueue *queue)
349{
350	struct task t_barrier;
351
352	if (STAILQ_EMPTY(&queue->tq_queue))
353		return;
354
355	/*
356	 * Enqueue our barrier after all current tasks, but with
357	 * the highest priority so that newly queued tasks cannot
358	 * pass it.  Because of the high priority, we can not use
359	 * taskqueue_enqueue_locked directly (which drops the lock
360	 * anyway) so just insert it at tail while we have the
361	 * queue lock.
362	 */
363	TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
364	STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
365	t_barrier.ta_pending = 1;
366
367	/*
368	 * Once the barrier has executed, all previously queued tasks
369	 * have completed or are currently executing.
370	 */
371	while (t_barrier.ta_pending != 0)
372		TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
373}
374
375/*
376 * Block until all currently executing tasks for this taskqueue
377 * complete.  Tasks that begin execution during the execution
378 * of this function are ignored.
379 */
380static void
381taskqueue_drain_tq_active(struct taskqueue *queue)
382{
383	struct taskqueue_busy tb_marker, *tb_first;
384
385	if (TAILQ_EMPTY(&queue->tq_active))
386		return;
387
388	/* Block taskq_terminate().*/
389	queue->tq_callouts++;
390
391	/*
392	 * Wait for all currently executing taskqueue threads
393	 * to go idle.
394	 */
395	tb_marker.tb_running = TB_DRAIN_WAITER;
396	TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
397	while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
398		TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
399	TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
400
401	/*
402	 * Wakeup any other drain waiter that happened to queue up
403	 * without any intervening active thread.
404	 */
405	tb_first = TAILQ_FIRST(&queue->tq_active);
406	if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
407		wakeup(tb_first);
408
409	/* Release taskqueue_terminate(). */
410	queue->tq_callouts--;
411	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
412		wakeup_one(queue->tq_threads);
413}
414
415void
416taskqueue_block(struct taskqueue *queue)
417{
418
419	TQ_LOCK(queue);
420	queue->tq_flags |= TQ_FLAGS_BLOCKED;
421	TQ_UNLOCK(queue);
422}
423
424void
425taskqueue_unblock(struct taskqueue *queue)
426{
427
428	TQ_LOCK(queue);
429	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
430	if (!STAILQ_EMPTY(&queue->tq_queue))
431		queue->tq_enqueue(queue->tq_context);
432	TQ_UNLOCK(queue);
433}
434
435static void
436taskqueue_run_locked(struct taskqueue *queue)
437{
438	struct taskqueue_busy tb;
439	struct taskqueue_busy *tb_first;
440	struct task *task;
441	int pending;
442
443	KASSERT(queue != NULL, ("tq is NULL"));
444	TQ_ASSERT_LOCKED(queue);
445	tb.tb_running = NULL;
446
447	while (STAILQ_FIRST(&queue->tq_queue)) {
448		TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
449
450		/*
451		 * Carefully remove the first task from the queue and
452		 * zero its pending count.
453		 */
454		task = STAILQ_FIRST(&queue->tq_queue);
455		KASSERT(task != NULL, ("task is NULL"));
456		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
457		pending = task->ta_pending;
458		task->ta_pending = 0;
459		tb.tb_running = task;
460		TQ_UNLOCK(queue);
461
462		KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
463		task->ta_func(task->ta_context, pending);
464
465		TQ_LOCK(queue);
466		tb.tb_running = NULL;
467		wakeup(task);
468
469		TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
470		tb_first = TAILQ_FIRST(&queue->tq_active);
471		if (tb_first != NULL &&
472		    tb_first->tb_running == TB_DRAIN_WAITER)
473			wakeup(tb_first);
474	}
475}
476
477void
478taskqueue_run(struct taskqueue *queue)
479{
480
481	TQ_LOCK(queue);
482	taskqueue_run_locked(queue);
483	TQ_UNLOCK(queue);
484}
485
486static int
487task_is_running(struct taskqueue *queue, struct task *task)
488{
489	struct taskqueue_busy *tb;
490
491	TQ_ASSERT_LOCKED(queue);
492	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
493		if (tb->tb_running == task)
494			return (1);
495	}
496	return (0);
497}
498
499/*
500 * Only use this function in single threaded contexts. It returns
501 * non-zero if the given task is either pending or running. Else the
502 * task is idle and can be queued again or freed.
503 */
504int
505taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task)
506{
507	int retval;
508
509	TQ_LOCK(queue);
510	retval = task->ta_pending > 0 || task_is_running(queue, task);
511	TQ_UNLOCK(queue);
512
513	return (retval);
514}
515
516static int
517taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
518    u_int *pendp)
519{
520
521	if (task->ta_pending > 0)
522		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
523	if (pendp != NULL)
524		*pendp = task->ta_pending;
525	task->ta_pending = 0;
526	return (task_is_running(queue, task) ? EBUSY : 0);
527}
528
529int
530taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
531{
532	int error;
533
534	TQ_LOCK(queue);
535	error = taskqueue_cancel_locked(queue, task, pendp);
536	TQ_UNLOCK(queue);
537
538	return (error);
539}
540
541int
542taskqueue_cancel_timeout(struct taskqueue *queue,
543    struct timeout_task *timeout_task, u_int *pendp)
544{
545	u_int pending, pending1;
546	int error;
547
548	TQ_LOCK(queue);
549	pending = !!(callout_stop(&timeout_task->c) > 0);
550	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
551	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
552		timeout_task->f &= ~DT_CALLOUT_ARMED;
553		queue->tq_callouts--;
554	}
555	TQ_UNLOCK(queue);
556
557	if (pendp != NULL)
558		*pendp = pending + pending1;
559	return (error);
560}
561
562void
563taskqueue_drain(struct taskqueue *queue, struct task *task)
564{
565
566	if (!queue->tq_spin)
567		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
568
569	TQ_LOCK(queue);
570	while (task->ta_pending != 0 || task_is_running(queue, task))
571		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
572	TQ_UNLOCK(queue);
573}
574
575void
576taskqueue_drain_all(struct taskqueue *queue)
577{
578
579	if (!queue->tq_spin)
580		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
581
582	TQ_LOCK(queue);
583	taskqueue_drain_tq_queue(queue);
584	taskqueue_drain_tq_active(queue);
585	TQ_UNLOCK(queue);
586}
587
588void
589taskqueue_drain_timeout(struct taskqueue *queue,
590    struct timeout_task *timeout_task)
591{
592
593	/*
594	 * Set flag to prevent timer from re-starting during drain:
595	 */
596	TQ_LOCK(queue);
597	KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0,
598	    ("Drain already in progress"));
599	timeout_task->f |= DT_DRAIN_IN_PROGRESS;
600	TQ_UNLOCK(queue);
601
602	callout_drain(&timeout_task->c);
603	taskqueue_drain(queue, &timeout_task->t);
604
605	/*
606	 * Clear flag to allow timer to re-start:
607	 */
608	TQ_LOCK(queue);
609	timeout_task->f &= ~DT_DRAIN_IN_PROGRESS;
610	TQ_UNLOCK(queue);
611}
612
613static void
614taskqueue_swi_enqueue(void *context)
615{
616	swi_sched(taskqueue_ih, 0);
617}
618
619static void
620taskqueue_swi_run(void *dummy)
621{
622	taskqueue_run(taskqueue_swi);
623}
624
625static void
626taskqueue_swi_giant_enqueue(void *context)
627{
628	swi_sched(taskqueue_giant_ih, 0);
629}
630
631static void
632taskqueue_swi_giant_run(void *dummy)
633{
634	taskqueue_run(taskqueue_swi_giant);
635}
636
637static int
638_taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
639    cpuset_t *mask, const char *name, va_list ap)
640{
641	char ktname[MAXCOMLEN + 1];
642	struct thread *td;
643	struct taskqueue *tq;
644	int i, error;
645
646	if (count <= 0)
647		return (EINVAL);
648
649	vsnprintf(ktname, sizeof(ktname), name, ap);
650	tq = *tqp;
651
652	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
653	    M_NOWAIT | M_ZERO);
654	if (tq->tq_threads == NULL) {
655		printf("%s: no memory for %s threads\n", __func__, ktname);
656		return (ENOMEM);
657	}
658
659	for (i = 0; i < count; i++) {
660		if (count == 1)
661			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
662			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
663		else
664			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
665			    &tq->tq_threads[i], RFSTOPPED, 0,
666			    "%s_%d", ktname, i);
667		if (error) {
668			/* should be ok to continue, taskqueue_free will dtrt */
669			printf("%s: kthread_add(%s): error %d", __func__,
670			    ktname, error);
671			tq->tq_threads[i] = NULL;		/* paranoid */
672		} else
673			tq->tq_tcount++;
674	}
675	for (i = 0; i < count; i++) {
676		if (tq->tq_threads[i] == NULL)
677			continue;
678		td = tq->tq_threads[i];
679		if (mask) {
680			error = cpuset_setthread(td->td_tid, mask);
681			/*
682			 * Failing to pin is rarely an actual fatal error;
683			 * it'll just affect performance.
684			 */
685			if (error)
686				printf("%s: curthread=%llu: can't pin; "
687				    "error=%d\n",
688				    __func__,
689				    (unsigned long long) td->td_tid,
690				    error);
691		}
692		thread_lock(td);
693		sched_prio(td, pri);
694		sched_add(td, SRQ_BORING);
695		thread_unlock(td);
696	}
697
698	return (0);
699}
700
701int
702taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
703    const char *name, ...)
704{
705	va_list ap;
706	int error;
707
708	va_start(ap, name);
709	error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
710	va_end(ap);
711	return (error);
712}
713
714int
715taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
716    cpuset_t *mask, const char *name, ...)
717{
718	va_list ap;
719	int error;
720
721	va_start(ap, name);
722	error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
723	va_end(ap);
724	return (error);
725}
726
727static inline void
728taskqueue_run_callback(struct taskqueue *tq,
729    enum taskqueue_callback_type cb_type)
730{
731	taskqueue_callback_fn tq_callback;
732
733	TQ_ASSERT_UNLOCKED(tq);
734	tq_callback = tq->tq_callbacks[cb_type];
735	if (tq_callback != NULL)
736		tq_callback(tq->tq_cb_contexts[cb_type]);
737}
738
739void
740taskqueue_thread_loop(void *arg)
741{
742	struct taskqueue **tqp, *tq;
743
744	tqp = arg;
745	tq = *tqp;
746	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
747	TQ_LOCK(tq);
748	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
749		/* XXX ? */
750		taskqueue_run_locked(tq);
751		/*
752		 * Because taskqueue_run() can drop tq_mutex, we need to
753		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
754		 * meantime, which means we missed a wakeup.
755		 */
756		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
757			break;
758		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
759	}
760	taskqueue_run_locked(tq);
761	/*
762	 * This thread is on its way out, so just drop the lock temporarily
763	 * in order to call the shutdown callback.  This allows the callback
764	 * to look at the taskqueue, even just before it dies.
765	 */
766	TQ_UNLOCK(tq);
767	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
768	TQ_LOCK(tq);
769
770	/* rendezvous with thread that asked us to terminate */
771	tq->tq_tcount--;
772	wakeup_one(tq->tq_threads);
773	TQ_UNLOCK(tq);
774	kthread_exit();
775}
776
777void
778taskqueue_thread_enqueue(void *context)
779{
780	struct taskqueue **tqp, *tq;
781
782	tqp = context;
783	tq = *tqp;
784	wakeup_one(tq);
785}
786
787TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
788		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
789		     INTR_MPSAFE, &taskqueue_ih));
790
791TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
792		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
793		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
794
795TASKQUEUE_DEFINE_THREAD(thread);
796
797struct taskqueue *
798taskqueue_create_fast(const char *name, int mflags,
799		 taskqueue_enqueue_fn enqueue, void *context)
800{
801	return _taskqueue_create(name, mflags, enqueue, context,
802			MTX_SPIN, "fast_taskqueue");
803}
804
805static void	*taskqueue_fast_ih;
806
807static void
808taskqueue_fast_enqueue(void *context)
809{
810	swi_sched(taskqueue_fast_ih, 0);
811}
812
813static void
814taskqueue_fast_run(void *dummy)
815{
816	taskqueue_run(taskqueue_fast);
817}
818
819TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
820	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
821	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
822
823int
824taskqueue_member(struct taskqueue *queue, struct thread *td)
825{
826	int i, j, ret = 0;
827
828	for (i = 0, j = 0; ; i++) {
829		if (queue->tq_threads[i] == NULL)
830			continue;
831		if (queue->tq_threads[i] == td) {
832			ret = 1;
833			break;
834		}
835		if (++j >= queue->tq_tcount)
836			break;
837	}
838	return (ret);
839}
840