subr_taskqueue.c revision 145473
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 145473 2005-04-24 16:52:45Z sam $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/mutex.h>
39#include <sys/taskqueue.h>
40#include <sys/unistd.h>
41
42static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
43static void	*taskqueue_giant_ih;
44static void	*taskqueue_ih;
45static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
46static struct mtx taskqueue_queues_mutex;
47
48struct taskqueue {
49	STAILQ_ENTRY(taskqueue)	tq_link;
50	STAILQ_HEAD(, task)	tq_queue;
51	const char		*tq_name;
52	taskqueue_enqueue_fn	tq_enqueue;
53	void			*tq_context;
54	struct task		*tq_running;
55	struct mtx		tq_mutex;
56};
57
58static void	init_taskqueue_list(void *data);
59
60static void
61init_taskqueue_list(void *data __unused)
62{
63
64	mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
65	STAILQ_INIT(&taskqueue_queues);
66}
67SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
68    NULL);
69
70struct taskqueue *
71taskqueue_create(const char *name, int mflags,
72		 taskqueue_enqueue_fn enqueue, void *context)
73{
74	struct taskqueue *queue;
75
76	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
77	if (!queue)
78		return 0;
79
80	STAILQ_INIT(&queue->tq_queue);
81	queue->tq_name = name;
82	queue->tq_enqueue = enqueue;
83	queue->tq_context = context;
84	mtx_init(&queue->tq_mutex, "taskqueue", NULL, MTX_DEF);
85
86	mtx_lock(&taskqueue_queues_mutex);
87	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
88	mtx_unlock(&taskqueue_queues_mutex);
89
90	return queue;
91}
92
93void
94taskqueue_free(struct taskqueue *queue)
95{
96
97	mtx_lock(&taskqueue_queues_mutex);
98	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
99	mtx_unlock(&taskqueue_queues_mutex);
100
101	mtx_lock(&queue->tq_mutex);
102	taskqueue_run(queue);
103	mtx_destroy(&queue->tq_mutex);
104	free(queue, M_TASKQUEUE);
105}
106
107/*
108 * Returns with the taskqueue locked.
109 */
110struct taskqueue *
111taskqueue_find(const char *name)
112{
113	struct taskqueue *queue;
114
115	mtx_lock(&taskqueue_queues_mutex);
116	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
117		if (strcmp(queue->tq_name, name) == 0) {
118			mtx_lock(&queue->tq_mutex);
119			mtx_unlock(&taskqueue_queues_mutex);
120			return queue;
121		}
122	}
123	mtx_unlock(&taskqueue_queues_mutex);
124	return NULL;
125}
126
127int
128taskqueue_enqueue(struct taskqueue *queue, struct task *task)
129{
130	struct task *ins;
131	struct task *prev;
132
133	mtx_lock(&queue->tq_mutex);
134
135	/*
136	 * Count multiple enqueues.
137	 */
138	if (task->ta_pending) {
139		task->ta_pending++;
140		mtx_unlock(&queue->tq_mutex);
141		return 0;
142	}
143
144	/*
145	 * Optimise the case when all tasks have the same priority.
146	 */
147	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
148	if (!prev || prev->ta_priority >= task->ta_priority) {
149		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
150	} else {
151		prev = 0;
152		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
153		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
154			if (ins->ta_priority < task->ta_priority)
155				break;
156
157		if (prev)
158			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
159		else
160			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
161	}
162
163	task->ta_pending = 1;
164	if (queue->tq_enqueue)
165		queue->tq_enqueue(queue->tq_context);
166
167	mtx_unlock(&queue->tq_mutex);
168
169	return 0;
170}
171
172void
173taskqueue_run(struct taskqueue *queue)
174{
175	struct task *task;
176	int owned, pending;
177
178	owned = mtx_owned(&queue->tq_mutex);
179	if (!owned)
180		mtx_lock(&queue->tq_mutex);
181	while (STAILQ_FIRST(&queue->tq_queue)) {
182		/*
183		 * Carefully remove the first task from the queue and
184		 * zero its pending count.
185		 */
186		task = STAILQ_FIRST(&queue->tq_queue);
187		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
188		pending = task->ta_pending;
189		task->ta_pending = 0;
190		queue->tq_running = task;
191		mtx_unlock(&queue->tq_mutex);
192
193		task->ta_func(task->ta_context, pending);
194
195		mtx_lock(&queue->tq_mutex);
196		queue->tq_running = NULL;
197		wakeup(task);
198	}
199
200	/*
201	 * For compatibility, unlock on return if the queue was not locked
202	 * on entry, although this opens a race window.
203	 */
204	if (!owned)
205		mtx_unlock(&queue->tq_mutex);
206}
207
208void
209taskqueue_drain(struct taskqueue *queue, struct task *task)
210{
211	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "taskqueue_drain");
212	mtx_lock(&queue->tq_mutex);
213	while (task->ta_pending != 0 || task == queue->tq_running) {
214		msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
215	}
216	mtx_unlock(&queue->tq_mutex);
217}
218
219static void
220taskqueue_swi_enqueue(void *context)
221{
222	swi_sched(taskqueue_ih, 0);
223}
224
225static void
226taskqueue_swi_run(void *dummy)
227{
228	taskqueue_run(taskqueue_swi);
229}
230
231static void
232taskqueue_swi_giant_enqueue(void *context)
233{
234	swi_sched(taskqueue_giant_ih, 0);
235}
236
237static void
238taskqueue_swi_giant_run(void *dummy)
239{
240	taskqueue_run(taskqueue_swi_giant);
241}
242
243void
244taskqueue_thread_loop(void *arg)
245{
246	struct taskqueue **tqp, *tq;
247
248	tqp = arg;
249	tq = *tqp;
250	mtx_lock(&tq->tq_mutex);
251	for (;;) {
252		taskqueue_run(tq);
253		msleep(tq, &tq->tq_mutex, PWAIT, "-", 0);
254	}
255}
256
257void
258taskqueue_thread_enqueue(void *context)
259{
260	struct taskqueue **tqp, *tq;
261
262	tqp = context;
263	tq = *tqp;
264
265	mtx_assert(&tq->tq_mutex, MA_OWNED);
266	wakeup(tq);
267}
268
269TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
270		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
271		     INTR_MPSAFE, &taskqueue_ih));
272
273TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
274		 swi_add(NULL, "Giant task queue", taskqueue_swi_giant_run,
275		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
276
277TASKQUEUE_DEFINE_THREAD(thread);
278
279int
280taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
281{
282	struct task *ins;
283	struct task *prev;
284
285	mtx_lock_spin(&queue->tq_mutex);
286
287	/*
288	 * Count multiple enqueues.
289	 */
290	if (task->ta_pending) {
291		task->ta_pending++;
292		mtx_unlock_spin(&queue->tq_mutex);
293		return 0;
294	}
295
296	/*
297	 * Optimise the case when all tasks have the same priority.
298	 */
299	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
300	if (!prev || prev->ta_priority >= task->ta_priority) {
301		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
302	} else {
303		prev = 0;
304		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
305		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
306			if (ins->ta_priority < task->ta_priority)
307				break;
308
309		if (prev)
310			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
311		else
312			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
313	}
314
315	task->ta_pending = 1;
316	if (queue->tq_enqueue)
317		queue->tq_enqueue(queue->tq_context);
318
319	mtx_unlock_spin(&queue->tq_mutex);
320
321	return 0;
322}
323
324static void
325taskqueue_run_fast(struct taskqueue *queue)
326{
327	struct task *task;
328	int pending;
329
330	mtx_lock_spin(&queue->tq_mutex);
331	while (STAILQ_FIRST(&queue->tq_queue)) {
332		/*
333		 * Carefully remove the first task from the queue and
334		 * zero its pending count.
335		 */
336		task = STAILQ_FIRST(&queue->tq_queue);
337		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
338		pending = task->ta_pending;
339		task->ta_pending = 0;
340		mtx_unlock_spin(&queue->tq_mutex);
341
342		task->ta_func(task->ta_context, pending);
343
344		mtx_lock_spin(&queue->tq_mutex);
345	}
346	mtx_unlock_spin(&queue->tq_mutex);
347}
348
349struct taskqueue *taskqueue_fast;
350static void	*taskqueue_fast_ih;
351
352static void
353taskqueue_fast_schedule(void *context)
354{
355	swi_sched(taskqueue_fast_ih, 0);
356}
357
358static void
359taskqueue_fast_run(void *dummy)
360{
361	taskqueue_run_fast(taskqueue_fast);
362}
363
364static void
365taskqueue_define_fast(void *arg)
366{
367
368	taskqueue_fast = malloc(sizeof(struct taskqueue), M_TASKQUEUE,
369	    M_NOWAIT | M_ZERO);
370	if (!taskqueue_fast) {
371		printf("%s: Unable to allocate fast task queue!\n", __func__);
372		return;
373	}
374
375	STAILQ_INIT(&taskqueue_fast->tq_queue);
376	taskqueue_fast->tq_name = "fast";
377	taskqueue_fast->tq_enqueue = taskqueue_fast_schedule;
378	mtx_init(&taskqueue_fast->tq_mutex, "taskqueue_fast", NULL, MTX_SPIN);
379
380	mtx_lock(&taskqueue_queues_mutex);
381	STAILQ_INSERT_TAIL(&taskqueue_queues, taskqueue_fast, tq_link);
382	mtx_unlock(&taskqueue_queues_mutex);
383
384	swi_add(NULL, "Fast task queue", taskqueue_fast_run,
385		NULL, SWI_TQ_FAST, 0, &taskqueue_fast_ih);
386}
387SYSINIT(taskqueue_fast, SI_SUB_CONFIGURE, SI_ORDER_SECOND,
388    taskqueue_define_fast, NULL);
389