1304021Sshurd/*-
2304021Sshurd * Copyright (c) 2000 Doug Rabson
3304021Sshurd * Copyright (c) 2014 Jeff Roberson
4304021Sshurd * Copyright (c) 2016 Matthew Macy
5304021Sshurd * All rights reserved.
6304021Sshurd *
7304021Sshurd * Redistribution and use in source and binary forms, with or without
8304021Sshurd * modification, are permitted provided that the following conditions
9304021Sshurd * are met:
10304021Sshurd * 1. Redistributions of source code must retain the above copyright
11304021Sshurd *    notice, this list of conditions and the following disclaimer.
12304021Sshurd * 2. Redistributions in binary form must reproduce the above copyright
13304021Sshurd *    notice, this list of conditions and the following disclaimer in the
14304021Sshurd *    documentation and/or other materials provided with the distribution.
15304021Sshurd *
16304021Sshurd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17304021Sshurd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18304021Sshurd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19304021Sshurd * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20304021Sshurd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21304021Sshurd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22304021Sshurd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23304021Sshurd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24304021Sshurd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25304021Sshurd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26304021Sshurd * SUCH DAMAGE.
27304021Sshurd */
28304021Sshurd
29304021Sshurd#include <sys/cdefs.h>
30304021Sshurd__FBSDID("$FreeBSD: stable/11/sys/kern/subr_gtaskqueue.c 354406 2019-11-06 18:15:20Z mav $");
31304021Sshurd
32304021Sshurd#include <sys/param.h>
33304021Sshurd#include <sys/systm.h>
34304021Sshurd#include <sys/bus.h>
35304021Sshurd#include <sys/cpuset.h>
36304021Sshurd#include <sys/interrupt.h>
37304021Sshurd#include <sys/kernel.h>
38304021Sshurd#include <sys/kthread.h>
39304021Sshurd#include <sys/libkern.h>
40304021Sshurd#include <sys/limits.h>
41304021Sshurd#include <sys/lock.h>
42304021Sshurd#include <sys/malloc.h>
43304021Sshurd#include <sys/mutex.h>
44304021Sshurd#include <sys/proc.h>
45304021Sshurd#include <sys/sched.h>
46304021Sshurd#include <sys/smp.h>
47304021Sshurd#include <sys/gtaskqueue.h>
48304021Sshurd#include <sys/unistd.h>
49304021Sshurd#include <machine/stdarg.h>
50304021Sshurd
51333338Sshurdstatic MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
52304021Sshurdstatic void	gtaskqueue_thread_enqueue(void *);
53304021Sshurdstatic void	gtaskqueue_thread_loop(void *arg);
54304021Sshurd
55328328ShselaskyTASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
56304021Sshurd
57304021Sshurdstruct gtaskqueue_busy {
58354406Smav	struct gtask		*tb_running;
59354406Smav	u_int			 tb_seq;
60354406Smav	LIST_ENTRY(gtaskqueue_busy) tb_link;
61304021Sshurd};
62304021Sshurd
63304021Sshurdstruct gtaskqueue {
64304021Sshurd	STAILQ_HEAD(, gtask)	tq_queue;
65354406Smav	LIST_HEAD(, gtaskqueue_busy) tq_active;
66354406Smav	u_int			tq_seq;
67354406Smav	int			tq_callouts;
68354406Smav	struct mtx_padalign	tq_mutex;
69304021Sshurd	gtaskqueue_enqueue_fn	tq_enqueue;
70304021Sshurd	void			*tq_context;
71304021Sshurd	char			*tq_name;
72304021Sshurd	struct thread		**tq_threads;
73304021Sshurd	int			tq_tcount;
74304021Sshurd	int			tq_spin;
75304021Sshurd	int			tq_flags;
76304021Sshurd	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
77304021Sshurd	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
78304021Sshurd};
79304021Sshurd
80304021Sshurd#define	TQ_FLAGS_ACTIVE		(1 << 0)
81304021Sshurd#define	TQ_FLAGS_BLOCKED	(1 << 1)
82304021Sshurd#define	TQ_FLAGS_UNLOCKED_ENQUEUE	(1 << 2)
83304021Sshurd
84304021Sshurd#define	DT_CALLOUT_ARMED	(1 << 0)
85304021Sshurd
86304021Sshurd#define	TQ_LOCK(tq)							\
87304021Sshurd	do {								\
88304021Sshurd		if ((tq)->tq_spin)					\
89304021Sshurd			mtx_lock_spin(&(tq)->tq_mutex);			\
90304021Sshurd		else							\
91304021Sshurd			mtx_lock(&(tq)->tq_mutex);			\
92304021Sshurd	} while (0)
93304021Sshurd#define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
94304021Sshurd
95304021Sshurd#define	TQ_UNLOCK(tq)							\
96304021Sshurd	do {								\
97304021Sshurd		if ((tq)->tq_spin)					\
98304021Sshurd			mtx_unlock_spin(&(tq)->tq_mutex);		\
99304021Sshurd		else							\
100304021Sshurd			mtx_unlock(&(tq)->tq_mutex);			\
101304021Sshurd	} while (0)
102304021Sshurd#define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
103304021Sshurd
104328802Smav#ifdef INVARIANTS
105328802Smavstatic void
106328802Smavgtask_dump(struct gtask *gtask)
107328802Smav{
108328802Smav	printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n",
109328802Smav	       gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context);
110328802Smav}
111328802Smav#endif
112328802Smav
113304021Sshurdstatic __inline int
114354406SmavTQ_SLEEP(struct gtaskqueue *tq, void *p, const char *wm)
115304021Sshurd{
116304021Sshurd	if (tq->tq_spin)
117354406Smav		return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0));
118354406Smav	return (msleep(p, &tq->tq_mutex, 0, wm, 0));
119304021Sshurd}
120304021Sshurd
121304021Sshurdstatic struct gtaskqueue *
122304021Sshurd_gtaskqueue_create(const char *name, int mflags,
123304021Sshurd		 taskqueue_enqueue_fn enqueue, void *context,
124304021Sshurd		 int mtxflags, const char *mtxname __unused)
125304021Sshurd{
126304021Sshurd	struct gtaskqueue *queue;
127304021Sshurd	char *tq_name;
128304021Sshurd
129304021Sshurd	tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
130304021Sshurd	if (!tq_name)
131304021Sshurd		return (NULL);
132304021Sshurd
133304021Sshurd	snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
134304021Sshurd
135304021Sshurd	queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
136333338Sshurd	if (!queue) {
137333338Sshurd		free(tq_name, M_GTASKQUEUE);
138304021Sshurd		return (NULL);
139333338Sshurd	}
140304021Sshurd
141304021Sshurd	STAILQ_INIT(&queue->tq_queue);
142354406Smav	LIST_INIT(&queue->tq_active);
143304021Sshurd	queue->tq_enqueue = enqueue;
144304021Sshurd	queue->tq_context = context;
145304021Sshurd	queue->tq_name = tq_name;
146304021Sshurd	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
147304021Sshurd	queue->tq_flags |= TQ_FLAGS_ACTIVE;
148304021Sshurd	if (enqueue == gtaskqueue_thread_enqueue)
149304021Sshurd		queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
150304021Sshurd	mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
151304021Sshurd
152304021Sshurd	return (queue);
153304021Sshurd}
154304021Sshurd
155304021Sshurd
156304021Sshurd/*
157304021Sshurd * Signal a taskqueue thread to terminate.
158304021Sshurd */
159304021Sshurdstatic void
160304021Sshurdgtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
161304021Sshurd{
162304021Sshurd
163304021Sshurd	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
164304021Sshurd		wakeup(tq);
165354406Smav		TQ_SLEEP(tq, pp, "gtq_destroy");
166304021Sshurd	}
167304021Sshurd}
168304021Sshurd
169304021Sshurdstatic void
170304021Sshurdgtaskqueue_free(struct gtaskqueue *queue)
171304021Sshurd{
172304021Sshurd
173304021Sshurd	TQ_LOCK(queue);
174304021Sshurd	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
175304021Sshurd	gtaskqueue_terminate(queue->tq_threads, queue);
176354406Smav	KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
177304021Sshurd	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
178304021Sshurd	mtx_destroy(&queue->tq_mutex);
179304021Sshurd	free(queue->tq_threads, M_GTASKQUEUE);
180304021Sshurd	free(queue->tq_name, M_GTASKQUEUE);
181304021Sshurd	free(queue, M_GTASKQUEUE);
182304021Sshurd}
183304021Sshurd
184304021Sshurdint
185304021Sshurdgrouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
186304021Sshurd{
187328802Smav#ifdef INVARIANTS
188328802Smav	if (queue == NULL) {
189328802Smav		gtask_dump(gtask);
190328802Smav		panic("queue == NULL");
191328802Smav	}
192328802Smav#endif
193304021Sshurd	TQ_LOCK(queue);
194304021Sshurd	if (gtask->ta_flags & TASK_ENQUEUED) {
195304021Sshurd		TQ_UNLOCK(queue);
196304021Sshurd		return (0);
197304021Sshurd	}
198304021Sshurd	STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
199304021Sshurd	gtask->ta_flags |= TASK_ENQUEUED;
200304021Sshurd	TQ_UNLOCK(queue);
201304021Sshurd	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
202304021Sshurd		queue->tq_enqueue(queue->tq_context);
203304021Sshurd	return (0);
204304021Sshurd}
205304021Sshurd
206304021Sshurdstatic void
207304021Sshurdgtaskqueue_task_nop_fn(void *context)
208304021Sshurd{
209304021Sshurd}
210304021Sshurd
211304021Sshurd/*
212304021Sshurd * Block until all currently queued tasks in this taskqueue
213304021Sshurd * have begun execution.  Tasks queued during execution of
214304021Sshurd * this function are ignored.
215304021Sshurd */
216304021Sshurdstatic void
217304021Sshurdgtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
218304021Sshurd{
219304021Sshurd	struct gtask t_barrier;
220304021Sshurd
221304021Sshurd	if (STAILQ_EMPTY(&queue->tq_queue))
222304021Sshurd		return;
223304021Sshurd
224304021Sshurd	/*
225304021Sshurd	 * Enqueue our barrier after all current tasks, but with
226304021Sshurd	 * the highest priority so that newly queued tasks cannot
227304021Sshurd	 * pass it.  Because of the high priority, we can not use
228304021Sshurd	 * taskqueue_enqueue_locked directly (which drops the lock
229304021Sshurd	 * anyway) so just insert it at tail while we have the
230304021Sshurd	 * queue lock.
231304021Sshurd	 */
232304021Sshurd	GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
233304021Sshurd	STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
234304021Sshurd	t_barrier.ta_flags |= TASK_ENQUEUED;
235304021Sshurd
236304021Sshurd	/*
237304021Sshurd	 * Once the barrier has executed, all previously queued tasks
238304021Sshurd	 * have completed or are currently executing.
239304021Sshurd	 */
240304021Sshurd	while (t_barrier.ta_flags & TASK_ENQUEUED)
241354406Smav		TQ_SLEEP(queue, &t_barrier, "gtq_qdrain");
242304021Sshurd}
243304021Sshurd
244304021Sshurd/*
245304021Sshurd * Block until all currently executing tasks for this taskqueue
246304021Sshurd * complete.  Tasks that begin execution during the execution
247304021Sshurd * of this function are ignored.
248304021Sshurd */
249304021Sshurdstatic void
250304021Sshurdgtaskqueue_drain_tq_active(struct gtaskqueue *queue)
251304021Sshurd{
252354406Smav	struct gtaskqueue_busy *tb;
253354406Smav	u_int seq;
254304021Sshurd
255354406Smav	if (LIST_EMPTY(&queue->tq_active))
256304021Sshurd		return;
257304021Sshurd
258304021Sshurd	/* Block taskq_terminate().*/
259304021Sshurd	queue->tq_callouts++;
260304021Sshurd
261354406Smav	/* Wait for any active task with sequence from the past. */
262354406Smav	seq = queue->tq_seq;
263354406Smavrestart:
264354406Smav	LIST_FOREACH(tb, &queue->tq_active, tb_link) {
265354406Smav		if ((int)(tb->tb_seq - seq) <= 0) {
266354406Smav			TQ_SLEEP(queue, tb->tb_running, "gtq_adrain");
267354406Smav			goto restart;
268354406Smav		}
269354406Smav	}
270304021Sshurd
271304021Sshurd	/* Release taskqueue_terminate(). */
272304021Sshurd	queue->tq_callouts--;
273304021Sshurd	if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
274304021Sshurd		wakeup_one(queue->tq_threads);
275304021Sshurd}
276304021Sshurd
277304021Sshurdvoid
278304021Sshurdgtaskqueue_block(struct gtaskqueue *queue)
279304021Sshurd{
280304021Sshurd
281304021Sshurd	TQ_LOCK(queue);
282304021Sshurd	queue->tq_flags |= TQ_FLAGS_BLOCKED;
283304021Sshurd	TQ_UNLOCK(queue);
284304021Sshurd}
285304021Sshurd
286304021Sshurdvoid
287304021Sshurdgtaskqueue_unblock(struct gtaskqueue *queue)
288304021Sshurd{
289304021Sshurd
290304021Sshurd	TQ_LOCK(queue);
291304021Sshurd	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
292304021Sshurd	if (!STAILQ_EMPTY(&queue->tq_queue))
293304021Sshurd		queue->tq_enqueue(queue->tq_context);
294304021Sshurd	TQ_UNLOCK(queue);
295304021Sshurd}
296304021Sshurd
297304021Sshurdstatic void
298304021Sshurdgtaskqueue_run_locked(struct gtaskqueue *queue)
299304021Sshurd{
300304021Sshurd	struct gtaskqueue_busy tb;
301304021Sshurd	struct gtask *gtask;
302304021Sshurd
303304021Sshurd	KASSERT(queue != NULL, ("tq is NULL"));
304304021Sshurd	TQ_ASSERT_LOCKED(queue);
305304021Sshurd	tb.tb_running = NULL;
306354406Smav	LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
307304021Sshurd
308354406Smav	while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
309304021Sshurd		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
310304021Sshurd		gtask->ta_flags &= ~TASK_ENQUEUED;
311304021Sshurd		tb.tb_running = gtask;
312354406Smav		tb.tb_seq = ++queue->tq_seq;
313304021Sshurd		TQ_UNLOCK(queue);
314304021Sshurd
315304021Sshurd		KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
316304021Sshurd		gtask->ta_func(gtask->ta_context);
317304021Sshurd
318304021Sshurd		TQ_LOCK(queue);
319304021Sshurd		wakeup(gtask);
320304021Sshurd	}
321354406Smav	LIST_REMOVE(&tb, tb_link);
322304021Sshurd}
323304021Sshurd
324304021Sshurdstatic int
325304021Sshurdtask_is_running(struct gtaskqueue *queue, struct gtask *gtask)
326304021Sshurd{
327304021Sshurd	struct gtaskqueue_busy *tb;
328304021Sshurd
329304021Sshurd	TQ_ASSERT_LOCKED(queue);
330354406Smav	LIST_FOREACH(tb, &queue->tq_active, tb_link) {
331304021Sshurd		if (tb->tb_running == gtask)
332304021Sshurd			return (1);
333304021Sshurd	}
334304021Sshurd	return (0);
335304021Sshurd}
336304021Sshurd
337304021Sshurdstatic int
338304021Sshurdgtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
339304021Sshurd{
340304021Sshurd
341304021Sshurd	if (gtask->ta_flags & TASK_ENQUEUED)
342304021Sshurd		STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
343304021Sshurd	gtask->ta_flags &= ~TASK_ENQUEUED;
344304021Sshurd	return (task_is_running(queue, gtask) ? EBUSY : 0);
345304021Sshurd}
346304021Sshurd
347304021Sshurdint
348304021Sshurdgtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
349304021Sshurd{
350304021Sshurd	int error;
351304021Sshurd
352304021Sshurd	TQ_LOCK(queue);
353304021Sshurd	error = gtaskqueue_cancel_locked(queue, gtask);
354304021Sshurd	TQ_UNLOCK(queue);
355304021Sshurd
356304021Sshurd	return (error);
357304021Sshurd}
358304021Sshurd
359304021Sshurdvoid
360304021Sshurdgtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
361304021Sshurd{
362304021Sshurd
363304021Sshurd	if (!queue->tq_spin)
364304021Sshurd		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
365304021Sshurd
366304021Sshurd	TQ_LOCK(queue);
367304021Sshurd	while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
368354406Smav		TQ_SLEEP(queue, gtask, "gtq_drain");
369304021Sshurd	TQ_UNLOCK(queue);
370304021Sshurd}
371304021Sshurd
372304021Sshurdvoid
373304021Sshurdgtaskqueue_drain_all(struct gtaskqueue *queue)
374304021Sshurd{
375304021Sshurd
376304021Sshurd	if (!queue->tq_spin)
377304021Sshurd		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
378304021Sshurd
379304021Sshurd	TQ_LOCK(queue);
380304021Sshurd	gtaskqueue_drain_tq_queue(queue);
381304021Sshurd	gtaskqueue_drain_tq_active(queue);
382304021Sshurd	TQ_UNLOCK(queue);
383304021Sshurd}
384304021Sshurd
385304021Sshurdstatic int
386304021Sshurd_gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
387304021Sshurd    cpuset_t *mask, const char *name, va_list ap)
388304021Sshurd{
389304021Sshurd	char ktname[MAXCOMLEN + 1];
390304021Sshurd	struct thread *td;
391304021Sshurd	struct gtaskqueue *tq;
392304021Sshurd	int i, error;
393304021Sshurd
394304021Sshurd	if (count <= 0)
395304021Sshurd		return (EINVAL);
396304021Sshurd
397304021Sshurd	vsnprintf(ktname, sizeof(ktname), name, ap);
398304021Sshurd	tq = *tqp;
399304021Sshurd
400304021Sshurd	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
401304021Sshurd	    M_NOWAIT | M_ZERO);
402304021Sshurd	if (tq->tq_threads == NULL) {
403304021Sshurd		printf("%s: no memory for %s threads\n", __func__, ktname);
404304021Sshurd		return (ENOMEM);
405304021Sshurd	}
406304021Sshurd
407304021Sshurd	for (i = 0; i < count; i++) {
408304021Sshurd		if (count == 1)
409304021Sshurd			error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
410304021Sshurd			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
411304021Sshurd		else
412304021Sshurd			error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
413304021Sshurd			    &tq->tq_threads[i], RFSTOPPED, 0,
414304021Sshurd			    "%s_%d", ktname, i);
415304021Sshurd		if (error) {
416304021Sshurd			/* should be ok to continue, taskqueue_free will dtrt */
417304021Sshurd			printf("%s: kthread_add(%s): error %d", __func__,
418304021Sshurd			    ktname, error);
419304021Sshurd			tq->tq_threads[i] = NULL;		/* paranoid */
420304021Sshurd		} else
421304021Sshurd			tq->tq_tcount++;
422304021Sshurd	}
423304021Sshurd	for (i = 0; i < count; i++) {
424304021Sshurd		if (tq->tq_threads[i] == NULL)
425304021Sshurd			continue;
426304021Sshurd		td = tq->tq_threads[i];
427304021Sshurd		if (mask) {
428304021Sshurd			error = cpuset_setthread(td->td_tid, mask);
429304021Sshurd			/*
430304021Sshurd			 * Failing to pin is rarely an actual fatal error;
431304021Sshurd			 * it'll just affect performance.
432304021Sshurd			 */
433304021Sshurd			if (error)
434304021Sshurd				printf("%s: curthread=%llu: can't pin; "
435304021Sshurd				    "error=%d\n",
436304021Sshurd				    __func__,
437304021Sshurd				    (unsigned long long) td->td_tid,
438304021Sshurd				    error);
439304021Sshurd		}
440304021Sshurd		thread_lock(td);
441304021Sshurd		sched_prio(td, pri);
442304021Sshurd		sched_add(td, SRQ_BORING);
443304021Sshurd		thread_unlock(td);
444304021Sshurd	}
445304021Sshurd
446304021Sshurd	return (0);
447304021Sshurd}
448304021Sshurd
449304021Sshurdstatic int
450304021Sshurdgtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
451304021Sshurd    const char *name, ...)
452304021Sshurd{
453304021Sshurd	va_list ap;
454304021Sshurd	int error;
455304021Sshurd
456304021Sshurd	va_start(ap, name);
457304021Sshurd	error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
458304021Sshurd	va_end(ap);
459304021Sshurd	return (error);
460304021Sshurd}
461304021Sshurd
462304021Sshurdstatic inline void
463304021Sshurdgtaskqueue_run_callback(struct gtaskqueue *tq,
464304021Sshurd    enum taskqueue_callback_type cb_type)
465304021Sshurd{
466304021Sshurd	taskqueue_callback_fn tq_callback;
467304021Sshurd
468304021Sshurd	TQ_ASSERT_UNLOCKED(tq);
469304021Sshurd	tq_callback = tq->tq_callbacks[cb_type];
470304021Sshurd	if (tq_callback != NULL)
471304021Sshurd		tq_callback(tq->tq_cb_contexts[cb_type]);
472304021Sshurd}
473304021Sshurd
474304021Sshurdstatic void
475304021Sshurdgtaskqueue_thread_loop(void *arg)
476304021Sshurd{
477304021Sshurd	struct gtaskqueue **tqp, *tq;
478304021Sshurd
479304021Sshurd	tqp = arg;
480304021Sshurd	tq = *tqp;
481304021Sshurd	gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
482304021Sshurd	TQ_LOCK(tq);
483304021Sshurd	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
484304021Sshurd		/* XXX ? */
485304021Sshurd		gtaskqueue_run_locked(tq);
486304021Sshurd		/*
487304021Sshurd		 * Because taskqueue_run() can drop tq_mutex, we need to
488304021Sshurd		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
489304021Sshurd		 * meantime, which means we missed a wakeup.
490304021Sshurd		 */
491304021Sshurd		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
492304021Sshurd			break;
493354406Smav		TQ_SLEEP(tq, tq, "-");
494304021Sshurd	}
495304021Sshurd	gtaskqueue_run_locked(tq);
496304021Sshurd	/*
497304021Sshurd	 * This thread is on its way out, so just drop the lock temporarily
498304021Sshurd	 * in order to call the shutdown callback.  This allows the callback
499304021Sshurd	 * to look at the taskqueue, even just before it dies.
500304021Sshurd	 */
501304021Sshurd	TQ_UNLOCK(tq);
502304021Sshurd	gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
503304021Sshurd	TQ_LOCK(tq);
504304021Sshurd
505304021Sshurd	/* rendezvous with thread that asked us to terminate */
506304021Sshurd	tq->tq_tcount--;
507304021Sshurd	wakeup_one(tq->tq_threads);
508304021Sshurd	TQ_UNLOCK(tq);
509304021Sshurd	kthread_exit();
510304021Sshurd}
511304021Sshurd
512304021Sshurdstatic void
513304021Sshurdgtaskqueue_thread_enqueue(void *context)
514304021Sshurd{
515304021Sshurd	struct gtaskqueue **tqp, *tq;
516304021Sshurd
517304021Sshurd	tqp = context;
518304021Sshurd	tq = *tqp;
519354406Smav	wakeup_any(tq);
520304021Sshurd}
521304021Sshurd
522304021Sshurd
523304021Sshurdstatic struct gtaskqueue *
524304021Sshurdgtaskqueue_create_fast(const char *name, int mflags,
525304021Sshurd		 taskqueue_enqueue_fn enqueue, void *context)
526304021Sshurd{
527304021Sshurd	return _gtaskqueue_create(name, mflags, enqueue, context,
528304021Sshurd			MTX_SPIN, "fast_taskqueue");
529304021Sshurd}
530304021Sshurd
531304021Sshurd
532304021Sshurdstruct taskqgroup_cpu {
533304021Sshurd	LIST_HEAD(, grouptask)	tgc_tasks;
534304021Sshurd	struct gtaskqueue	*tgc_taskq;
535304021Sshurd	int	tgc_cnt;
536304021Sshurd	int	tgc_cpu;
537304021Sshurd};
538304021Sshurd
539304021Sshurdstruct taskqgroup {
540304021Sshurd	struct taskqgroup_cpu tqg_queue[MAXCPU];
541304021Sshurd	struct mtx	tqg_lock;
542304021Sshurd	char *		tqg_name;
543304021Sshurd	int		tqg_adjusting;
544304021Sshurd	int		tqg_stride;
545304021Sshurd	int		tqg_cnt;
546304021Sshurd};
547304021Sshurd
548304021Sshurdstruct taskq_bind_task {
549304021Sshurd	struct gtask bt_task;
550304021Sshurd	int	bt_cpuid;
551304021Sshurd};
552304021Sshurd
553304021Sshurdstatic void
554328798Smavtaskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
555304021Sshurd{
556304021Sshurd	struct taskqgroup_cpu *qcpu;
557304021Sshurd
558304021Sshurd	qcpu = &qgroup->tqg_queue[idx];
559304021Sshurd	LIST_INIT(&qcpu->tgc_tasks);
560304021Sshurd	qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
561304021Sshurd	    taskqueue_thread_enqueue, &qcpu->tgc_taskq);
562304021Sshurd	gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
563304021Sshurd	    "%s_%d", qgroup->tqg_name, idx);
564328798Smav	qcpu->tgc_cpu = cpu;
565304021Sshurd}
566304021Sshurd
567304021Sshurdstatic void
568304021Sshurdtaskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
569304021Sshurd{
570304021Sshurd
571304021Sshurd	gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
572304021Sshurd}
573304021Sshurd
574304021Sshurd/*
575304021Sshurd * Find the taskq with least # of tasks that doesn't currently have any
576304021Sshurd * other queues from the uniq identifier.
577304021Sshurd */
578304021Sshurdstatic int
579304021Sshurdtaskqgroup_find(struct taskqgroup *qgroup, void *uniq)
580304021Sshurd{
581304021Sshurd	struct grouptask *n;
582304021Sshurd	int i, idx, mincnt;
583304021Sshurd	int strict;
584304021Sshurd
585304021Sshurd	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
586304021Sshurd	if (qgroup->tqg_cnt == 0)
587304021Sshurd		return (0);
588304021Sshurd	idx = -1;
589304021Sshurd	mincnt = INT_MAX;
590304021Sshurd	/*
591304021Sshurd	 * Two passes;  First scan for a queue with the least tasks that
592304021Sshurd	 * does not already service this uniq id.  If that fails simply find
593304021Sshurd	 * the queue with the least total tasks;
594304021Sshurd	 */
595304021Sshurd	for (strict = 1; mincnt == INT_MAX; strict = 0) {
596304021Sshurd		for (i = 0; i < qgroup->tqg_cnt; i++) {
597304021Sshurd			if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
598304021Sshurd				continue;
599304021Sshurd			if (strict) {
600304021Sshurd				LIST_FOREACH(n,
601304021Sshurd				    &qgroup->tqg_queue[i].tgc_tasks, gt_list)
602304021Sshurd					if (n->gt_uniq == uniq)
603304021Sshurd						break;
604304021Sshurd				if (n != NULL)
605304021Sshurd					continue;
606304021Sshurd			}
607304021Sshurd			mincnt = qgroup->tqg_queue[i].tgc_cnt;
608304021Sshurd			idx = i;
609304021Sshurd		}
610304021Sshurd	}
611304021Sshurd	if (idx == -1)
612304021Sshurd		panic("taskqgroup_find: Failed to pick a qid.");
613304021Sshurd
614304021Sshurd	return (idx);
615304021Sshurd}
616304021Sshurd
617328805Smav/*
618328805Smav * smp_started is unusable since it is not set for UP kernels or even for
619328805Smav * SMP kernels when there is 1 CPU.  This is usually handled by adding a
620328805Smav * (mp_ncpus == 1) test, but that would be broken here since we need to
621328805Smav * to synchronize with the SI_SUB_SMP ordering.  Even in the pure SMP case
622328805Smav * smp_started only gives a fuzzy ordering relative to SI_SUB_SMP.
623328805Smav *
624328805Smav * So maintain our own flag.  It must be set after all CPUs are started
625328805Smav * and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed
626328805Smav * adjustment is properly delayed.  SI_ORDER_FOURTH is clearly before
627328805Smav * SI_ORDER_ANY and unclearly after the CPUs are started.  It would be
628328805Smav * simpler for adjustment to pass a flag indicating if it is delayed.
629328805Smav */
630328805Smav
631328805Smavstatic int tqg_smp_started;
632328805Smav
633328805Smavstatic void
634328805Smavtqg_record_smp_started(void *arg)
635328805Smav{
636328805Smav	tqg_smp_started = 1;
637328805Smav}
638328805Smav
639328805SmavSYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH,
640328805Smav	tqg_record_smp_started, NULL);
641328805Smav
642304021Sshurdvoid
643304021Sshurdtaskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
644304021Sshurd    void *uniq, int irq, char *name)
645304021Sshurd{
646304021Sshurd	cpuset_t mask;
647333338Sshurd	int qid, error;
648304021Sshurd
649304021Sshurd	gtask->gt_uniq = uniq;
650333338Sshurd	snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
651304021Sshurd	gtask->gt_irq = irq;
652304021Sshurd	gtask->gt_cpu = -1;
653304021Sshurd	mtx_lock(&qgroup->tqg_lock);
654304021Sshurd	qid = taskqgroup_find(qgroup, uniq);
655304021Sshurd	qgroup->tqg_queue[qid].tgc_cnt++;
656304021Sshurd	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
657304021Sshurd	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
658328805Smav	if (irq != -1 && tqg_smp_started) {
659328798Smav		gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
660304021Sshurd		CPU_ZERO(&mask);
661304021Sshurd		CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
662304021Sshurd		mtx_unlock(&qgroup->tqg_lock);
663333338Sshurd		error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
664333338Sshurd		if (error)
665333338Sshurd			printf("%s: setaffinity failed for %s: %d\n", __func__, gtask->gt_name, error);
666304021Sshurd	} else
667304021Sshurd		mtx_unlock(&qgroup->tqg_lock);
668304021Sshurd}
669304021Sshurd
670328798Smavstatic void
671328798Smavtaskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
672328798Smav{
673328798Smav	cpuset_t mask;
674333338Sshurd	int qid, cpu, error;
675328798Smav
676328798Smav	mtx_lock(&qgroup->tqg_lock);
677328798Smav	qid = taskqgroup_find(qgroup, gtask->gt_uniq);
678328798Smav	cpu = qgroup->tqg_queue[qid].tgc_cpu;
679328798Smav	if (gtask->gt_irq != -1) {
680328798Smav		mtx_unlock(&qgroup->tqg_lock);
681328798Smav
682328801Smav		CPU_ZERO(&mask);
683328801Smav		CPU_SET(cpu, &mask);
684333338Sshurd		error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask);
685333338Sshurd		mtx_lock(&qgroup->tqg_lock);
686333338Sshurd		if (error)
687333338Sshurd			printf("%s: %s setaffinity failed: %d\n", __func__, gtask->gt_name, error);
688328798Smav
689328798Smav	}
690328798Smav	qgroup->tqg_queue[qid].tgc_cnt++;
691328798Smav
692328798Smav	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
693328798Smav			 gt_list);
694328800Smav	MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
695328798Smav	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
696328798Smav	mtx_unlock(&qgroup->tqg_lock);
697328798Smav}
698328798Smav
699304021Sshurdint
700304021Sshurdtaskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
701304021Sshurd	void *uniq, int cpu, int irq, char *name)
702304021Sshurd{
703304021Sshurd	cpuset_t mask;
704333338Sshurd	int i, qid, error;
705304021Sshurd
706304021Sshurd	qid = -1;
707304021Sshurd	gtask->gt_uniq = uniq;
708333338Sshurd	snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
709304021Sshurd	gtask->gt_irq = irq;
710304021Sshurd	gtask->gt_cpu = cpu;
711304021Sshurd	mtx_lock(&qgroup->tqg_lock);
712328805Smav	if (tqg_smp_started) {
713304021Sshurd		for (i = 0; i < qgroup->tqg_cnt; i++)
714304021Sshurd			if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
715304021Sshurd				qid = i;
716304021Sshurd				break;
717304021Sshurd			}
718304021Sshurd		if (qid == -1) {
719304021Sshurd			mtx_unlock(&qgroup->tqg_lock);
720333338Sshurd			printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
721304021Sshurd			return (EINVAL);
722304021Sshurd		}
723304021Sshurd	} else
724304021Sshurd		qid = 0;
725304021Sshurd	qgroup->tqg_queue[qid].tgc_cnt++;
726304021Sshurd	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
727304021Sshurd	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
728328798Smav	cpu = qgroup->tqg_queue[qid].tgc_cpu;
729328798Smav	mtx_unlock(&qgroup->tqg_lock);
730328798Smav
731328798Smav	CPU_ZERO(&mask);
732328798Smav	CPU_SET(cpu, &mask);
733333338Sshurd	if (irq != -1 && tqg_smp_started) {
734333338Sshurd		error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
735333338Sshurd		if (error)
736333338Sshurd			printf("%s: setaffinity failed: %d\n", __func__, error);
737333338Sshurd	}
738328798Smav	return (0);
739328798Smav}
740328798Smav
741328798Smavstatic int
742328798Smavtaskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
743328798Smav{
744328798Smav	cpuset_t mask;
745333338Sshurd	int i, qid, irq, cpu, error;
746328798Smav
747328798Smav	qid = -1;
748328798Smav	irq = gtask->gt_irq;
749328798Smav	cpu = gtask->gt_cpu;
750328805Smav	MPASS(tqg_smp_started);
751328798Smav	mtx_lock(&qgroup->tqg_lock);
752328798Smav	for (i = 0; i < qgroup->tqg_cnt; i++)
753328798Smav		if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
754328798Smav			qid = i;
755328798Smav			break;
756328798Smav		}
757328798Smav	if (qid == -1) {
758304021Sshurd		mtx_unlock(&qgroup->tqg_lock);
759333338Sshurd		printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
760328798Smav		return (EINVAL);
761328798Smav	}
762328798Smav	qgroup->tqg_queue[qid].tgc_cnt++;
763328798Smav	LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
764328800Smav	MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
765328798Smav	gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
766328798Smav	mtx_unlock(&qgroup->tqg_lock);
767328798Smav
768328798Smav	CPU_ZERO(&mask);
769328798Smav	CPU_SET(cpu, &mask);
770328798Smav
771333338Sshurd	if (irq != -1) {
772333338Sshurd		error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
773333338Sshurd		if (error)
774333338Sshurd			printf("%s: setaffinity failed: %d\n", __func__, error);
775333338Sshurd	}
776304021Sshurd	return (0);
777304021Sshurd}
778304021Sshurd
779304021Sshurdvoid
780304021Sshurdtaskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
781304021Sshurd{
782304021Sshurd	int i;
783304021Sshurd
784304021Sshurd	mtx_lock(&qgroup->tqg_lock);
785304021Sshurd	for (i = 0; i < qgroup->tqg_cnt; i++)
786304021Sshurd		if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
787304021Sshurd			break;
788304021Sshurd	if (i == qgroup->tqg_cnt)
789333338Sshurd		panic("taskqgroup_detach: task %s not in group\n", gtask->gt_name);
790304021Sshurd	qgroup->tqg_queue[i].tgc_cnt--;
791304021Sshurd	LIST_REMOVE(gtask, gt_list);
792304021Sshurd	mtx_unlock(&qgroup->tqg_lock);
793304021Sshurd	gtask->gt_taskqueue = NULL;
794304021Sshurd}
795304021Sshurd
796304021Sshurdstatic void
797304021Sshurdtaskqgroup_binder(void *ctx)
798304021Sshurd{
799304021Sshurd	struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx;
800304021Sshurd	cpuset_t mask;
801304021Sshurd	int error;
802304021Sshurd
803304021Sshurd	CPU_ZERO(&mask);
804304021Sshurd	CPU_SET(gtask->bt_cpuid, &mask);
805304021Sshurd	error = cpuset_setthread(curthread->td_tid, &mask);
806304021Sshurd	thread_lock(curthread);
807304021Sshurd	sched_bind(curthread, gtask->bt_cpuid);
808304021Sshurd	thread_unlock(curthread);
809304021Sshurd
810304021Sshurd	if (error)
811333338Sshurd		printf("%s: setaffinity failed: %d\n", __func__,
812304021Sshurd		    error);
813304021Sshurd	free(gtask, M_DEVBUF);
814304021Sshurd}
815304021Sshurd
816304021Sshurdstatic void
817304021Sshurdtaskqgroup_bind(struct taskqgroup *qgroup)
818304021Sshurd{
819304021Sshurd	struct taskq_bind_task *gtask;
820304021Sshurd	int i;
821304021Sshurd
822304021Sshurd	/*
823304021Sshurd	 * Bind taskqueue threads to specific CPUs, if they have been assigned
824304021Sshurd	 * one.
825304021Sshurd	 */
826328801Smav	if (qgroup->tqg_cnt == 1)
827328801Smav		return;
828328801Smav
829304021Sshurd	for (i = 0; i < qgroup->tqg_cnt; i++) {
830328796Smav		gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK);
831304021Sshurd		GTASK_INIT(&gtask->bt_task, 0, 0, taskqgroup_binder, gtask);
832304021Sshurd		gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
833304021Sshurd		grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
834304021Sshurd		    &gtask->bt_task);
835304021Sshurd	}
836304021Sshurd}
837304021Sshurd
838304021Sshurdstatic int
839304021Sshurd_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
840304021Sshurd{
841304021Sshurd	LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
842304021Sshurd	struct grouptask *gtask;
843328798Smav	int i, k, old_cnt, old_cpu, cpu;
844304021Sshurd
845304021Sshurd	mtx_assert(&qgroup->tqg_lock, MA_OWNED);
846304021Sshurd
847328805Smav	if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) {
848328805Smav		printf("%s: failed cnt: %d stride: %d "
849328805Smav		    "mp_ncpus: %d tqg_smp_started: %d\n",
850328805Smav		    __func__, cnt, stride, mp_ncpus, tqg_smp_started);
851304021Sshurd		return (EINVAL);
852304021Sshurd	}
853304021Sshurd	if (qgroup->tqg_adjusting) {
854333338Sshurd		printf("%s failed: adjusting\n", __func__);
855304021Sshurd		return (EBUSY);
856304021Sshurd	}
857304021Sshurd	qgroup->tqg_adjusting = 1;
858304021Sshurd	old_cnt = qgroup->tqg_cnt;
859328798Smav	old_cpu = 0;
860328798Smav	if (old_cnt < cnt)
861328798Smav		old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu;
862304021Sshurd	mtx_unlock(&qgroup->tqg_lock);
863304021Sshurd	/*
864304021Sshurd	 * Set up queue for tasks added before boot.
865304021Sshurd	 */
866304021Sshurd	if (old_cnt == 0) {
867304021Sshurd		LIST_SWAP(&gtask_head, &qgroup->tqg_queue[0].tgc_tasks,
868304021Sshurd		    grouptask, gt_list);
869304021Sshurd		qgroup->tqg_queue[0].tgc_cnt = 0;
870304021Sshurd	}
871304021Sshurd
872304021Sshurd	/*
873304021Sshurd	 * If new taskq threads have been added.
874304021Sshurd	 */
875328798Smav	cpu = old_cpu;
876328798Smav	for (i = old_cnt; i < cnt; i++) {
877328800Smav		taskqgroup_cpu_create(qgroup, i, cpu);
878328800Smav
879328800Smav		for (k = 0; k < stride; k++)
880328798Smav			cpu = CPU_NEXT(cpu);
881328798Smav	}
882304021Sshurd	mtx_lock(&qgroup->tqg_lock);
883304021Sshurd	qgroup->tqg_cnt = cnt;
884304021Sshurd	qgroup->tqg_stride = stride;
885304021Sshurd
886304021Sshurd	/*
887304021Sshurd	 * Adjust drivers to use new taskqs.
888304021Sshurd	 */
889304021Sshurd	for (i = 0; i < old_cnt; i++) {
890304021Sshurd		while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
891304021Sshurd			LIST_REMOVE(gtask, gt_list);
892304021Sshurd			qgroup->tqg_queue[i].tgc_cnt--;
893304021Sshurd			LIST_INSERT_HEAD(&gtask_head, gtask, gt_list);
894304021Sshurd		}
895304021Sshurd	}
896328798Smav	mtx_unlock(&qgroup->tqg_lock);
897328798Smav
898304021Sshurd	while ((gtask = LIST_FIRST(&gtask_head))) {
899304021Sshurd		LIST_REMOVE(gtask, gt_list);
900304021Sshurd		if (gtask->gt_cpu == -1)
901328798Smav			taskqgroup_attach_deferred(qgroup, gtask);
902328798Smav		else if (taskqgroup_attach_cpu_deferred(qgroup, gtask))
903328798Smav			taskqgroup_attach_deferred(qgroup, gtask);
904304021Sshurd	}
905304021Sshurd
906328800Smav#ifdef INVARIANTS
907328800Smav	mtx_lock(&qgroup->tqg_lock);
908328800Smav	for (i = 0; i < qgroup->tqg_cnt; i++) {
909328800Smav		MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL);
910328800Smav		LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list)
911328800Smav			MPASS(gtask->gt_taskqueue != NULL);
912328800Smav	}
913328800Smav	mtx_unlock(&qgroup->tqg_lock);
914328800Smav#endif
915304021Sshurd	/*
916304021Sshurd	 * If taskq thread count has been reduced.
917304021Sshurd	 */
918304021Sshurd	for (i = cnt; i < old_cnt; i++)
919304021Sshurd		taskqgroup_cpu_remove(qgroup, i);
920304021Sshurd
921328796Smav	taskqgroup_bind(qgroup);
922328796Smav
923304021Sshurd	mtx_lock(&qgroup->tqg_lock);
924304021Sshurd	qgroup->tqg_adjusting = 0;
925304021Sshurd
926304021Sshurd	return (0);
927304021Sshurd}
928304021Sshurd
929304021Sshurdint
930328798Smavtaskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
931304021Sshurd{
932304021Sshurd	int error;
933304021Sshurd
934304021Sshurd	mtx_lock(&qgroup->tqg_lock);
935328798Smav	error = _taskqgroup_adjust(qgroup, cnt, stride);
936304021Sshurd	mtx_unlock(&qgroup->tqg_lock);
937304021Sshurd
938304021Sshurd	return (error);
939304021Sshurd}
940304021Sshurd
941304021Sshurdstruct taskqgroup *
942304021Sshurdtaskqgroup_create(char *name)
943304021Sshurd{
944304021Sshurd	struct taskqgroup *qgroup;
945304021Sshurd
946304021Sshurd	qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
947304021Sshurd	mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
948304021Sshurd	qgroup->tqg_name = name;
949304021Sshurd	LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
950304021Sshurd
951304021Sshurd	return (qgroup);
952304021Sshurd}
953304021Sshurd
954304021Sshurdvoid
955304021Sshurdtaskqgroup_destroy(struct taskqgroup *qgroup)
956304021Sshurd{
957304021Sshurd
958304021Sshurd}
959