subr_taskqueue.c revision 177621
161033Sdfr/*-
261033Sdfr * Copyright (c) 2000 Doug Rabson
361033Sdfr * All rights reserved.
461033Sdfr *
561033Sdfr * Redistribution and use in source and binary forms, with or without
661033Sdfr * modification, are permitted provided that the following conditions
761033Sdfr * are met:
861033Sdfr * 1. Redistributions of source code must retain the above copyright
961033Sdfr *    notice, this list of conditions and the following disclaimer.
1061033Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1161033Sdfr *    notice, this list of conditions and the following disclaimer in the
1261033Sdfr *    documentation and/or other materials provided with the distribution.
1361033Sdfr *
1461033Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1561033Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1661033Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1761033Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1861033Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1961033Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2061033Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2161033Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2261033Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2361033Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2461033Sdfr * SUCH DAMAGE.
2561033Sdfr */
2661033Sdfr
27116182Sobrien#include <sys/cdefs.h>
28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 177621 2008-03-25 22:38:45Z scottl $");
29116182Sobrien
3061033Sdfr#include <sys/param.h>
3185521Sjhb#include <sys/systm.h>
3265822Sjhb#include <sys/bus.h>
3385560Sjhb#include <sys/interrupt.h>
3461033Sdfr#include <sys/kernel.h>
35123614Sjhb#include <sys/kthread.h>
3685521Sjhb#include <sys/lock.h>
3761033Sdfr#include <sys/malloc.h>
3885521Sjhb#include <sys/mutex.h>
39145729Ssam#include <sys/proc.h>
40154333Sscottl#include <sys/sched.h>
4185521Sjhb#include <sys/taskqueue.h>
42119708Sken#include <sys/unistd.h>
43154333Sscottl#include <machine/stdarg.h>
4461033Sdfr
4569774Sphkstatic MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
46123614Sjhbstatic void	*taskqueue_giant_ih;
47123614Sjhbstatic void	*taskqueue_ih;
4861033Sdfrstatic STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
4985521Sjhbstatic struct mtx taskqueue_queues_mutex;
5067551Sjhb
5161033Sdfrstruct taskqueue {
5261033Sdfr	STAILQ_ENTRY(taskqueue)	tq_link;
5361033Sdfr	STAILQ_HEAD(, task)	tq_queue;
5461033Sdfr	const char		*tq_name;
5561033Sdfr	taskqueue_enqueue_fn	tq_enqueue;
5661033Sdfr	void			*tq_context;
57145473Ssam	struct task		*tq_running;
5885521Sjhb	struct mtx		tq_mutex;
59145729Ssam	struct proc		**tq_pproc;
60154333Sscottl	int			tq_pcount;
61154167Sscottl	int			tq_spin;
62154333Sscottl	int			tq_flags;
6361033Sdfr};
6461033Sdfr
65154333Sscottl#define	TQ_FLAGS_ACTIVE		(1 << 0)
66177621Sscottl#define	TQ_FLAGS_BLOCKED	(1 << 1)
67177621Sscottl#define	TQ_FLAGS_PENDING	(1 << 2)
68154333Sscottl
69154167Sscottlstatic __inline void
70154167SscottlTQ_LOCK(struct taskqueue *tq)
71154167Sscottl{
72154167Sscottl	if (tq->tq_spin)
73154167Sscottl		mtx_lock_spin(&tq->tq_mutex);
74154167Sscottl	else
75154167Sscottl		mtx_lock(&tq->tq_mutex);
76154167Sscottl}
77154167Sscottl
78154167Sscottlstatic __inline void
79154167SscottlTQ_UNLOCK(struct taskqueue *tq)
80154167Sscottl{
81154167Sscottl	if (tq->tq_spin)
82154167Sscottl		mtx_unlock_spin(&tq->tq_mutex);
83154167Sscottl	else
84154167Sscottl		mtx_unlock(&tq->tq_mutex);
85154167Sscottl}
86154167Sscottl
8785521Sjhbstatic void	init_taskqueue_list(void *data);
8885521Sjhb
89154167Sscottlstatic __inline int
90154167SscottlTQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
91154167Sscottl    int t)
92154167Sscottl{
93154167Sscottl	if (tq->tq_spin)
94154167Sscottl		return (msleep_spin(p, m, wm, t));
95154167Sscottl	return (msleep(p, m, pri, wm, t));
96154167Sscottl}
97154167Sscottl
9885521Sjhbstatic void
9985521Sjhbinit_taskqueue_list(void *data __unused)
10085521Sjhb{
10185521Sjhb
10293818Sjhb	mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
10385521Sjhb	STAILQ_INIT(&taskqueue_queues);
10485521Sjhb}
10585521SjhbSYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
10685521Sjhb    NULL);
10785521Sjhb
108154167Sscottlstatic struct taskqueue *
109154167Sscottl_taskqueue_create(const char *name, int mflags,
110145729Ssam		 taskqueue_enqueue_fn enqueue, void *context,
111154167Sscottl		 int mtxflags, const char *mtxname)
11261033Sdfr{
11361033Sdfr	struct taskqueue *queue;
11461033Sdfr
11585521Sjhb	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
11661033Sdfr	if (!queue)
11761033Sdfr		return 0;
11885521Sjhb
11961033Sdfr	STAILQ_INIT(&queue->tq_queue);
12061033Sdfr	queue->tq_name = name;
12161033Sdfr	queue->tq_enqueue = enqueue;
12261033Sdfr	queue->tq_context = context;
123154167Sscottl	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
124154333Sscottl	queue->tq_flags |= TQ_FLAGS_ACTIVE;
125154167Sscottl	mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
12661033Sdfr
12785521Sjhb	mtx_lock(&taskqueue_queues_mutex);
12861033Sdfr	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
12985521Sjhb	mtx_unlock(&taskqueue_queues_mutex);
13061033Sdfr
13161033Sdfr	return queue;
13261033Sdfr}
13361033Sdfr
134154167Sscottlstruct taskqueue *
135154167Sscottltaskqueue_create(const char *name, int mflags,
136154333Sscottl		 taskqueue_enqueue_fn enqueue, void *context)
137154167Sscottl{
138154333Sscottl	return _taskqueue_create(name, mflags, enqueue, context,
139154167Sscottl			MTX_DEF, "taskqueue");
140154167Sscottl}
141154167Sscottl
142145729Ssam/*
143145729Ssam * Signal a taskqueue thread to terminate.
144145729Ssam */
145145729Ssamstatic void
146145729Ssamtaskqueue_terminate(struct proc **pp, struct taskqueue *tq)
147145729Ssam{
148145729Ssam
149154333Sscottl	while (tq->tq_pcount > 0) {
150154333Sscottl		wakeup(tq);
151154333Sscottl		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
152145729Ssam	}
153145729Ssam}
154145729Ssam
15561033Sdfrvoid
15661033Sdfrtaskqueue_free(struct taskqueue *queue)
15761033Sdfr{
15885521Sjhb
15985521Sjhb	mtx_lock(&taskqueue_queues_mutex);
16061033Sdfr	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
16185521Sjhb	mtx_unlock(&taskqueue_queues_mutex);
16261033Sdfr
163154167Sscottl	TQ_LOCK(queue);
164154333Sscottl	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
165131246Sjhb	taskqueue_run(queue);
166145729Ssam	taskqueue_terminate(queue->tq_pproc, queue);
16785521Sjhb	mtx_destroy(&queue->tq_mutex);
168154333Sscottl	free(queue->tq_pproc, M_TASKQUEUE);
16961033Sdfr	free(queue, M_TASKQUEUE);
17061033Sdfr}
17161033Sdfr
17285521Sjhb/*
17385521Sjhb * Returns with the taskqueue locked.
17485521Sjhb */
17561033Sdfrstruct taskqueue *
17661033Sdfrtaskqueue_find(const char *name)
17761033Sdfr{
17861033Sdfr	struct taskqueue *queue;
17961033Sdfr
18085521Sjhb	mtx_lock(&taskqueue_queues_mutex);
18185521Sjhb	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
182123614Sjhb		if (strcmp(queue->tq_name, name) == 0) {
183154167Sscottl			TQ_LOCK(queue);
18485521Sjhb			mtx_unlock(&taskqueue_queues_mutex);
18561033Sdfr			return queue;
18661033Sdfr		}
18785521Sjhb	}
18885521Sjhb	mtx_unlock(&taskqueue_queues_mutex);
189123614Sjhb	return NULL;
19061033Sdfr}
19161033Sdfr
19261033Sdfrint
19361033Sdfrtaskqueue_enqueue(struct taskqueue *queue, struct task *task)
19461033Sdfr{
19561033Sdfr	struct task *ins;
19661033Sdfr	struct task *prev;
19761033Sdfr
198154167Sscottl	TQ_LOCK(queue);
19985560Sjhb
20061033Sdfr	/*
20161033Sdfr	 * Count multiple enqueues.
20261033Sdfr	 */
20361033Sdfr	if (task->ta_pending) {
20461033Sdfr		task->ta_pending++;
205154167Sscottl		TQ_UNLOCK(queue);
20661033Sdfr		return 0;
20761033Sdfr	}
20861033Sdfr
20961033Sdfr	/*
21061033Sdfr	 * Optimise the case when all tasks have the same priority.
21161033Sdfr	 */
21264199Shsu	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
21361033Sdfr	if (!prev || prev->ta_priority >= task->ta_priority) {
21461033Sdfr		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
21561033Sdfr	} else {
21661033Sdfr		prev = 0;
21761033Sdfr		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
21861033Sdfr		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
21961033Sdfr			if (ins->ta_priority < task->ta_priority)
22061033Sdfr				break;
22161033Sdfr
22261033Sdfr		if (prev)
22361033Sdfr			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
22461033Sdfr		else
22561033Sdfr			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
22661033Sdfr	}
22761033Sdfr
22861033Sdfr	task->ta_pending = 1;
229177621Sscottl	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
230177621Sscottl		queue->tq_enqueue(queue->tq_context);
231177621Sscottl	else
232177621Sscottl		queue->tq_flags |= TQ_FLAGS_PENDING;
23385560Sjhb
234154167Sscottl	TQ_UNLOCK(queue);
23585560Sjhb
23661033Sdfr	return 0;
23761033Sdfr}
23861033Sdfr
23961033Sdfrvoid
240177621Sscottltaskqueue_block(struct taskqueue *queue)
241177621Sscottl{
242177621Sscottl
243177621Sscottl	TQ_LOCK(queue);
244177621Sscottl	queue->tq_flags |= TQ_FLAGS_BLOCKED;
245177621Sscottl	TQ_UNLOCK(queue);
246177621Sscottl}
247177621Sscottl
248177621Sscottlvoid
249177621Sscottltaskqueue_unblock(struct taskqueue *queue)
250177621Sscottl{
251177621Sscottl
252177621Sscottl	TQ_LOCK(queue);
253177621Sscottl	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
254177621Sscottl	if (queue->tq_flags & TQ_FLAGS_PENDING) {
255177621Sscottl		queue->tq_flags &= ~TQ_FLAGS_PENDING;
256177621Sscottl		queue->tq_enqueue(queue->tq_context);
257177621Sscottl	}
258177621Sscottl	TQ_UNLOCK(queue);
259177621Sscottl}
260177621Sscottl
261177621Sscottlvoid
26261033Sdfrtaskqueue_run(struct taskqueue *queue)
26361033Sdfr{
26461033Sdfr	struct task *task;
265131246Sjhb	int owned, pending;
26661033Sdfr
267131246Sjhb	owned = mtx_owned(&queue->tq_mutex);
268131246Sjhb	if (!owned)
269154167Sscottl		TQ_LOCK(queue);
27061033Sdfr	while (STAILQ_FIRST(&queue->tq_queue)) {
27161033Sdfr		/*
27261033Sdfr		 * Carefully remove the first task from the queue and
27361033Sdfr		 * zero its pending count.
27461033Sdfr		 */
27561033Sdfr		task = STAILQ_FIRST(&queue->tq_queue);
27661033Sdfr		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
27761033Sdfr		pending = task->ta_pending;
27861033Sdfr		task->ta_pending = 0;
279145473Ssam		queue->tq_running = task;
280154167Sscottl		TQ_UNLOCK(queue);
28161033Sdfr
28285560Sjhb		task->ta_func(task->ta_context, pending);
28361033Sdfr
284154167Sscottl		TQ_LOCK(queue);
285145473Ssam		queue->tq_running = NULL;
286136131Simp		wakeup(task);
28761033Sdfr	}
288131246Sjhb
289131246Sjhb	/*
290131246Sjhb	 * For compatibility, unlock on return if the queue was not locked
291131246Sjhb	 * on entry, although this opens a race window.
292131246Sjhb	 */
293131246Sjhb	if (!owned)
294154167Sscottl		TQ_UNLOCK(queue);
29561033Sdfr}
29661033Sdfr
297136131Simpvoid
298136131Simptaskqueue_drain(struct taskqueue *queue, struct task *task)
299136131Simp{
300154167Sscottl	if (queue->tq_spin) {		/* XXX */
301154167Sscottl		mtx_lock_spin(&queue->tq_mutex);
302154167Sscottl		while (task->ta_pending != 0 || task == queue->tq_running)
303154167Sscottl			msleep_spin(task, &queue->tq_mutex, "-", 0);
304154167Sscottl		mtx_unlock_spin(&queue->tq_mutex);
305154167Sscottl	} else {
306154167Sscottl		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
307145729Ssam
308154167Sscottl		mtx_lock(&queue->tq_mutex);
309154167Sscottl		while (task->ta_pending != 0 || task == queue->tq_running)
310154167Sscottl			msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
311154167Sscottl		mtx_unlock(&queue->tq_mutex);
312154167Sscottl	}
313136131Simp}
314136131Simp
31561033Sdfrstatic void
31661033Sdfrtaskqueue_swi_enqueue(void *context)
31761033Sdfr{
31888900Sjhb	swi_sched(taskqueue_ih, 0);
31961033Sdfr}
32061033Sdfr
32161033Sdfrstatic void
32267551Sjhbtaskqueue_swi_run(void *dummy)
32361033Sdfr{
32461033Sdfr	taskqueue_run(taskqueue_swi);
32561033Sdfr}
32661033Sdfr
327111528Sscottlstatic void
328111528Sscottltaskqueue_swi_giant_enqueue(void *context)
329111528Sscottl{
330111528Sscottl	swi_sched(taskqueue_giant_ih, 0);
331111528Sscottl}
332111528Sscottl
333111528Sscottlstatic void
334111528Sscottltaskqueue_swi_giant_run(void *dummy)
335111528Sscottl{
336111528Sscottl	taskqueue_run(taskqueue_swi_giant);
337111528Sscottl}
338111528Sscottl
339154333Sscottlint
340154333Sscottltaskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
341154333Sscottl			const char *name, ...)
342154333Sscottl{
343154333Sscottl	va_list ap;
344154333Sscottl	struct taskqueue *tq;
345158904Ssam	struct thread *td;
346154333Sscottl	char ktname[MAXCOMLEN];
347157314Ssam	int i, error;
348154333Sscottl
349154333Sscottl	if (count <= 0)
350154333Sscottl		return (EINVAL);
351154333Sscottl	tq = *tqp;
352154333Sscottl
353154333Sscottl	va_start(ap, name);
354154333Sscottl	vsnprintf(ktname, MAXCOMLEN, name, ap);
355154333Sscottl	va_end(ap);
356154333Sscottl
357157314Ssam	tq->tq_pproc = malloc(sizeof(struct proc *) * count, M_TASKQUEUE,
358157314Ssam	    M_NOWAIT | M_ZERO);
359157314Ssam	if (tq->tq_pproc == NULL) {
360157314Ssam		printf("%s: no memory for %s threads\n", __func__, ktname);
361157314Ssam		return (ENOMEM);
362157314Ssam	}
363157314Ssam
364154333Sscottl	for (i = 0; i < count; i++) {
365154333Sscottl		if (count == 1)
366172836Sjulian			error = kproc_create(taskqueue_thread_loop, tqp,
367158904Ssam			    &tq->tq_pproc[i], RFSTOPPED, 0, ktname);
368154333Sscottl		else
369172836Sjulian			error = kproc_create(taskqueue_thread_loop, tqp,
370158904Ssam			    &tq->tq_pproc[i], RFSTOPPED, 0, "%s_%d", ktname, i);
371158904Ssam		if (error) {
372157314Ssam			/* should be ok to continue, taskqueue_free will dtrt */
373172836Sjulian			printf("%s: kproc_create(%s): error %d",
374157314Ssam				__func__, ktname, error);
375158904Ssam			tq->tq_pproc[i] = NULL;		/* paranoid */
376158904Ssam		} else
377158904Ssam			tq->tq_pcount++;
378154333Sscottl	}
379158904Ssam	for (i = 0; i < count; i++) {
380158904Ssam		if (tq->tq_pproc[i] == NULL)
381158904Ssam			continue;
382158904Ssam		td = FIRST_THREAD_IN_PROC(tq->tq_pproc[i]);
383170307Sjeff		thread_lock(td);
384158904Ssam		sched_prio(td, pri);
385166188Sjeff		sched_add(td, SRQ_BORING);
386170307Sjeff		thread_unlock(td);
387158904Ssam	}
388154333Sscottl
389154333Sscottl	return (0);
390154333Sscottl}
391154333Sscottl
392133305Sjmgvoid
393133305Sjmgtaskqueue_thread_loop(void *arg)
394119708Sken{
395133305Sjmg	struct taskqueue **tqp, *tq;
396131246Sjhb
397133305Sjmg	tqp = arg;
398133305Sjmg	tq = *tqp;
399154167Sscottl	TQ_LOCK(tq);
400145729Ssam	do {
401133305Sjmg		taskqueue_run(tq);
402157815Sjhb		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
403154333Sscottl	} while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0);
404145729Ssam
405145729Ssam	/* rendezvous with thread that asked us to terminate */
406154333Sscottl	tq->tq_pcount--;
407154333Sscottl	wakeup_one(tq->tq_pproc);
408154167Sscottl	TQ_UNLOCK(tq);
409172836Sjulian	kproc_exit(0);
410119708Sken}
411119708Sken
412133305Sjmgvoid
413119708Skentaskqueue_thread_enqueue(void *context)
414119708Sken{
415133305Sjmg	struct taskqueue **tqp, *tq;
416131246Sjhb
417133305Sjmg	tqp = context;
418133305Sjmg	tq = *tqp;
419133305Sjmg
420133305Sjmg	mtx_assert(&tq->tq_mutex, MA_OWNED);
421145729Ssam	wakeup_one(tq);
422119708Sken}
423119708Sken
42461033SdfrTASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
425111528Sscottl		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
426111528Sscottl		     INTR_MPSAFE, &taskqueue_ih));
427111528Sscottl
428111528SscottlTASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
429151656Sjhb		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
430111528Sscottl		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
431119708Sken
432133305SjmgTASKQUEUE_DEFINE_THREAD(thread);
433119789Ssam
434154167Sscottlstruct taskqueue *
435154167Sscottltaskqueue_create_fast(const char *name, int mflags,
436154333Sscottl		 taskqueue_enqueue_fn enqueue, void *context)
437119789Ssam{
438154333Sscottl	return _taskqueue_create(name, mflags, enqueue, context,
439154167Sscottl			MTX_SPIN, "fast_taskqueue");
440119789Ssam}
441119789Ssam
442154167Sscottl/* NB: for backwards compatibility */
443154167Sscottlint
444154167Sscottltaskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
445119789Ssam{
446154167Sscottl	return taskqueue_enqueue(queue, task);
447119789Ssam}
448119789Ssam
449119789Ssamstatic void	*taskqueue_fast_ih;
450119789Ssam
451119789Ssamstatic void
452154167Sscottltaskqueue_fast_enqueue(void *context)
453119789Ssam{
454119789Ssam	swi_sched(taskqueue_fast_ih, 0);
455119789Ssam}
456119789Ssam
457119789Ssamstatic void
458119789Ssamtaskqueue_fast_run(void *dummy)
459119789Ssam{
460154167Sscottl	taskqueue_run(taskqueue_fast);
461119789Ssam}
462119789Ssam
463154167SscottlTASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, 0,
464154167Sscottl	swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
465154167Sscottl	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
466