subr_taskqueue.c revision 180583
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 180583 2008-07-18 06:12:31Z kmacy $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/kernel.h>
35#include <sys/ktr.h>
36#include <sys/kthread.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42#include <sys/taskqueue.h>
43#include <sys/unistd.h>
44#include <machine/stdarg.h>
45
46static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47static void	*taskqueue_giant_ih;
48static void	*taskqueue_ih;
49static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
50static struct mtx taskqueue_queues_mutex;
51
52STAILQ_HEAD(task_head, task);
53
54struct taskqueue {
55	STAILQ_ENTRY(taskqueue)	tq_link;
56	STAILQ_HEAD(, task)	tq_queue;
57	const char		*tq_name;
58	taskqueue_enqueue_fn	tq_enqueue;
59	void			*tq_context;
60	struct task		*tq_running;
61	struct mtx		tq_mutex;
62	struct thread		**tq_threads;
63	int			tq_tcount;
64	int			tq_flags;
65};
66
67#define	TQ_FLAGS_ACTIVE		(1 << 0)
68#define	TQ_FLAGS_BLOCKED	(1 << 1)
69#define	TQ_FLAGS_PENDING	(1 << 2)
70#define TQ_FLAGS_SPIN           (1 << 3)
71#define TQ_FLAGS_NOWAKEUP       (1 << 4)
72#define TQ_FLAGS_RUNNING        (1 << 5)
73
74#define TQ_LOCK(tq)                             \
75do {                                            \
76                                                \
77        if (tq->tq_flags & TQ_FLAGS_SPIN)       \
78                mtx_lock_spin(&tq->tq_mutex);   \
79        else                                    \
80                mtx_lock(&tq->tq_mutex);        \
81} while (0)
82
83
84#define TQ_UNLOCK(tq)                           \
85do {                                            \
86                                                \
87        if (tq->tq_flags & TQ_FLAGS_SPIN)       \
88                mtx_unlock_spin(&tq->tq_mutex); \
89        else                                    \
90                mtx_unlock(&tq->tq_mutex);      \
91} while (0)
92
93
94static void	init_taskqueue_list(void *data);
95
96static __inline int
97TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
98    int t)
99{
100	if (tq->tq_flags & TQ_FLAGS_SPIN)
101		return (msleep_spin(p, m, wm, t));
102	return (msleep(p, m, pri, wm, t));
103}
104
105static void
106init_taskqueue_list(void *data __unused)
107{
108
109	mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
110	STAILQ_INIT(&taskqueue_queues);
111}
112SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
113    NULL);
114
115static struct taskqueue *
116_taskqueue_create(const char *name, int mflags,
117		 taskqueue_enqueue_fn enqueue, void *context,
118		 int mtxflags, const char *mtxname)
119{
120	struct taskqueue *queue;
121	int spin;
122
123
124	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
125	if (!queue)
126		return 0;
127	spin = ((mtxflags & MTX_SPIN) ? TQ_FLAGS_SPIN : 0);
128	STAILQ_INIT(&queue->tq_queue);
129	queue->tq_name = name;
130	queue->tq_enqueue = enqueue;
131	queue->tq_context = context;
132	queue->tq_flags |= TQ_FLAGS_ACTIVE | spin;
133	mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
134
135	mtx_lock(&taskqueue_queues_mutex);
136	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
137	mtx_unlock(&taskqueue_queues_mutex);
138
139	return queue;
140}
141
142struct taskqueue *
143taskqueue_create(const char *name, int mflags,
144		 taskqueue_enqueue_fn enqueue, void *context)
145{
146	return _taskqueue_create(name, mflags, enqueue, context,
147			MTX_DEF, "taskqueue");
148}
149
150/*
151 * Signal a taskqueue thread to terminate.
152 */
153static void
154taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
155{
156
157	while (tq->tq_tcount > 0) {
158		wakeup(tq);
159		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
160	}
161}
162
163void
164taskqueue_free(struct taskqueue *queue)
165{
166
167	mtx_lock(&taskqueue_queues_mutex);
168	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
169	mtx_unlock(&taskqueue_queues_mutex);
170
171	TQ_LOCK(queue);
172	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
173	taskqueue_run(queue);
174	taskqueue_terminate(queue->tq_threads, queue);
175	mtx_destroy(&queue->tq_mutex);
176	free(queue->tq_threads, M_TASKQUEUE);
177	free(queue, M_TASKQUEUE);
178}
179
180/*
181 * Returns with the taskqueue locked.
182 */
183struct taskqueue *
184taskqueue_find(const char *name)
185{
186	struct taskqueue *queue;
187
188	mtx_lock(&taskqueue_queues_mutex);
189	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
190		if (strcmp(queue->tq_name, name) == 0) {
191			TQ_LOCK(queue);
192			mtx_unlock(&taskqueue_queues_mutex);
193			return queue;
194		}
195	}
196	mtx_unlock(&taskqueue_queues_mutex);
197	return NULL;
198}
199
200int
201taskqueue_enqueue(struct taskqueue *queue, struct task *task)
202{
203	struct task *ins;
204	struct task *prev;
205
206	TQ_LOCK(queue);
207
208	/*
209	 * Count multiple enqueues.
210	 */
211	if (task->ta_pending || (task->ta_flags & TA_REFERENCED)) {
212		task->ta_pending++;
213                /*
214                 * overflow
215                 */
216                if (task->ta_pending == 0)
217                        task->ta_pending--;
218
219		TQ_UNLOCK(queue);
220		return 0;
221	}
222
223	/*
224	 * Optimise the case when all tasks have the same priority.
225	 */
226	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
227	if (!prev || prev->ta_priority >= task->ta_priority) {
228		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
229	} else {
230		prev = 0;
231		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
232		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
233			if (ins->ta_priority < task->ta_priority)
234				break;
235
236		if (prev)
237			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
238		else
239			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
240	}
241
242	task->ta_pending = 1;
243	if ((queue->tq_flags & (TQ_FLAGS_BLOCKED|TQ_FLAGS_RUNNING)) == 0)
244		queue->tq_enqueue(queue->tq_context);
245	else if (queue->tq_flags & TQ_FLAGS_BLOCKED)
246		queue->tq_flags |= TQ_FLAGS_PENDING;
247
248	TQ_UNLOCK(queue);
249
250	return 0;
251}
252
253void
254taskqueue_block(struct taskqueue *queue)
255{
256
257	TQ_LOCK(queue);
258	queue->tq_flags |= TQ_FLAGS_BLOCKED;
259	TQ_UNLOCK(queue);
260}
261
262void
263taskqueue_unblock(struct taskqueue *queue)
264{
265
266	TQ_LOCK(queue);
267	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
268	if (queue->tq_flags & TQ_FLAGS_PENDING) {
269		queue->tq_flags &= ~TQ_FLAGS_PENDING;
270		queue->tq_enqueue(queue->tq_context);
271	}
272	TQ_UNLOCK(queue);
273}
274
275void
276taskqueue_run(struct taskqueue *queue)
277{
278	struct task *task;
279	int owned, pending;
280
281	owned = mtx_owned(&queue->tq_mutex);
282	if (!owned)
283		TQ_LOCK(queue);
284	while (STAILQ_FIRST(&queue->tq_queue)) {
285		/*
286		 * Carefully remove the first task from the queue and
287		 * zero its pending count.
288		 */
289		task = STAILQ_FIRST(&queue->tq_queue);
290		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
291		pending = task->ta_pending;
292		task->ta_pending = 0;
293		queue->tq_running = task;
294		TQ_UNLOCK(queue);
295
296		task->ta_func(task->ta_context, pending);
297
298		TQ_LOCK(queue);
299		queue->tq_running = NULL;
300		wakeup(task);
301	}
302
303	/*
304	 * For compatibility, unlock on return if the queue was not locked
305	 * on entry, although this opens a race window.
306	 */
307	if (!owned)
308		TQ_UNLOCK(queue);
309}
310
311void
312taskqueue_drain(struct taskqueue *queue, struct task *task)
313{
314	if (queue->tq_flags & TQ_FLAGS_SPIN) {		/* XXX */
315		mtx_lock_spin(&queue->tq_mutex);
316		while (task->ta_pending != 0 || task == queue->tq_running)
317			msleep_spin(task, &queue->tq_mutex, "-", 0);
318		mtx_unlock_spin(&queue->tq_mutex);
319	} else {
320		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
321
322		mtx_lock(&queue->tq_mutex);
323		while (task->ta_pending != 0 || task == queue->tq_running)
324			msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
325		mtx_unlock(&queue->tq_mutex);
326	}
327}
328
329static void
330taskqueue_swi_enqueue(void *context)
331{
332	swi_sched(taskqueue_ih, 0);
333}
334
335static void
336taskqueue_swi_run(void *dummy)
337{
338	taskqueue_run(taskqueue_swi);
339}
340
341static void
342taskqueue_swi_giant_enqueue(void *context)
343{
344	swi_sched(taskqueue_giant_ih, 0);
345}
346
347static void
348taskqueue_swi_giant_run(void *dummy)
349{
350	taskqueue_run(taskqueue_swi_giant);
351}
352
353int
354taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
355			const char *name, ...)
356{
357	va_list ap;
358	struct thread *td;
359	struct taskqueue *tq;
360	int i, error;
361	char ktname[MAXCOMLEN];
362
363	if (count <= 0)
364		return (EINVAL);
365
366	tq = *tqp;
367
368	va_start(ap, name);
369	vsnprintf(ktname, MAXCOMLEN, name, ap);
370	va_end(ap);
371
372	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
373	    M_NOWAIT | M_ZERO);
374	if (tq->tq_threads == NULL) {
375		printf("%s: no memory for %s threads\n", __func__, ktname);
376		return (ENOMEM);
377	}
378
379	for (i = 0; i < count; i++) {
380		if (count == 1)
381			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
382			    &tq->tq_threads[i], RFSTOPPED, 0, ktname);
383		else
384			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
385			    &tq->tq_threads[i], RFSTOPPED, 0,
386			    "%s_%d", ktname, i);
387		if (error) {
388			/* should be ok to continue, taskqueue_free will dtrt */
389			printf("%s: kthread_add(%s): error %d", __func__,
390			    ktname, error);
391			tq->tq_threads[i] = NULL;		/* paranoid */
392		} else
393			tq->tq_tcount++;
394	}
395	for (i = 0; i < count; i++) {
396		if (tq->tq_threads[i] == NULL)
397			continue;
398		td = tq->tq_threads[i];
399		thread_lock(td);
400		sched_prio(td, pri);
401		sched_add(td, SRQ_BORING);
402		thread_unlock(td);
403	}
404
405	return (0);
406}
407
408void
409taskqueue_thread_loop(void *arg)
410{
411	struct taskqueue **tqp, *tq;
412
413	tqp = arg;
414	tq = *tqp;
415	TQ_LOCK(tq);
416	do {
417		taskqueue_run(tq);
418		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
419	} while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0);
420
421	/* rendezvous with thread that asked us to terminate */
422	tq->tq_tcount--;
423	wakeup_one(tq->tq_threads);
424	TQ_UNLOCK(tq);
425	kthread_exit();
426}
427
428void
429taskqueue_thread_enqueue(void *context)
430{
431	struct taskqueue **tqp, *tq;
432
433	tqp = context;
434	tq = *tqp;
435
436	mtx_assert(&tq->tq_mutex, MA_OWNED);
437	wakeup_one(tq);
438}
439
440TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
441		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
442		     INTR_MPSAFE, &taskqueue_ih));
443
444TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
445		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
446		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
447
448TASKQUEUE_DEFINE_THREAD(thread);
449
450struct taskqueue *
451taskqueue_create_fast(const char *name, int mflags,
452		 taskqueue_enqueue_fn enqueue, void *context)
453{
454	return _taskqueue_create(name, mflags, enqueue, context,
455			MTX_SPIN, "fast_taskqueue");
456}
457
458/* NB: for backwards compatibility */
459int
460taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
461{
462	return taskqueue_enqueue(queue, task);
463}
464
465static void	*taskqueue_fast_ih;
466
467static void
468taskqueue_fast_enqueue(void *context)
469{
470	swi_sched(taskqueue_fast_ih, 0);
471}
472
473static void
474taskqueue_fast_run(void *dummy)
475{
476	taskqueue_run(taskqueue_fast);
477}
478
479TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, 0,
480	swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
481	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
482
483static void
484taskqueue_run_drv(void *arg)
485{
486        struct task *task, *tmp;
487        struct task_head current;
488        int restarts = 0;
489        struct taskqueue *queue = (struct taskqueue *) arg;
490
491        STAILQ_INIT(&current);
492        /*
493         * First we move all of the tasks off of the taskqueue's list
494         * on to current on the stack to avoided repeated serialization
495         */
496        mtx_lock_spin(&queue->tq_mutex);
497        queue->tq_flags |= TQ_FLAGS_RUNNING;
498restart:
499        STAILQ_CONCAT(&current, &queue->tq_queue);
500        STAILQ_FOREACH(task, &current, ta_link) {
501                /*
502                 * to let taskqueue_enqueue_fast know that this task
503                 * has been dequeued but is referenced
504                 * clear pending so that if pending is later set we know that it
505                 * it needs to be re-enqueued even if the task doesn't return
506                 * TA_NO_DEQUEUE
507                 */
508                task->ta_ppending = task->ta_pending;
509                task->ta_pending = 0;
510                task->ta_flags |= TA_REFERENCED;
511        }
512        mtx_unlock_spin(&queue->tq_mutex);
513        STAILQ_FOREACH(task, &current, ta_link) {
514                task->ta_rc = task->ta_drv_func(task->ta_context, task->ta_ppending);
515
516        }
517        /*
518         * We've gotten here so we know that we've run the tasks that were
519         * on the taskqueue list on the first pass
520         */
521        mtx_lock_spin(&queue->tq_mutex);
522        STAILQ_FOREACH_SAFE(task, &current, ta_link, tmp) {
523                if (task->ta_rc != TA_NO_DEQUEUE && task->ta_pending == 0) {
524                        STAILQ_REMOVE(&current, task, task, ta_link);
525                        task->ta_flags &= ~TA_REFERENCED;
526                }
527                task->ta_ppending = 0;
528                task->ta_rc = 0;
529        }
530        /*
531         * restart if there are any tasks in the list
532         */
533        if (STAILQ_FIRST(&current) || STAILQ_FIRST(&queue->tq_queue)) {
534                restarts++;
535                goto restart;
536        }
537        queue->tq_flags &= ~TQ_FLAGS_RUNNING;
538        mtx_unlock_spin(&queue->tq_mutex);
539        CTR2(KTR_INTR, "queue=%s returning from taskqueue_run_drv after %d restarts",  queue->tq_name, restarts);
540}
541
542static void
543taskqueue_drv_schedule(void *context)
544{
545        swi_sched(context, 0);
546}
547
548struct taskqueue *
549taskqueue_define_drv(void *arg, const char *name)
550{
551        struct taskqueue *tq;
552        struct thread *td;
553
554        tq = malloc(sizeof(struct taskqueue), M_TASKQUEUE,
555            M_NOWAIT | M_ZERO);
556        if (!tq) {
557                printf("%s: Unable to allocate fast drv task queue!\n",
558                    __func__);
559                return (NULL);
560        }
561
562        STAILQ_INIT(&tq->tq_queue);
563        tq->tq_name = name;
564        tq->tq_enqueue = taskqueue_drv_schedule;
565        tq->tq_flags = (TQ_FLAGS_ACTIVE | TQ_FLAGS_SPIN | TQ_FLAGS_NOWAKEUP);
566        mtx_init(&tq->tq_mutex, name, NULL, MTX_SPIN);
567
568        mtx_lock(&taskqueue_queues_mutex);
569        STAILQ_INSERT_TAIL(&taskqueue_queues, tq, tq_link);
570        mtx_unlock(&taskqueue_queues_mutex);
571
572        swi_add(NULL, name, taskqueue_run_drv,
573                tq, SWI_NET, INTR_MPSAFE, &tq->tq_context);
574        td = intr_handler_thread((struct intr_handler *) tq->tq_context);
575        return (tq);
576}
577
578struct intr_handler *
579taskqueue_drv_handler(struct taskqueue *tq)
580{
581        return ((struct intr_handler *) tq->tq_context);
582}
583
584struct thread *
585taskqueue_drv_thread(void *context)
586{
587        struct taskqueue *tq = (struct taskqueue *) context;
588
589        return (intr_handler_thread((struct intr_handler *) tq->tq_context));
590}
591
592/*
593 * Caller must make sure that there must not be any new tasks getting queued
594 * before calling this.
595 */
596void
597taskqueue_free_drv(struct taskqueue *queue)
598{
599        struct intr_thread *ithd;
600        struct intr_event *ie;
601
602        mtx_lock(&taskqueue_queues_mutex);
603        STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
604        mtx_unlock(&taskqueue_queues_mutex);
605
606        ie = ((struct intr_handler *)(queue->tq_context))->ih_event;
607        ithd = ie->ie_thread;
608        swi_remove(queue->tq_context);
609        intr_event_destroy(ie);
610
611        mtx_lock_spin(&queue->tq_mutex);
612        taskqueue_run(queue);
613        mtx_destroy(&queue->tq_mutex);
614        free(queue, M_TASKQUEUE);
615}
616
617