subr_taskqueue.c revision 131246
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 131246 2004-06-28 16:28:23Z jhb $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/mutex.h>
39#include <sys/taskqueue.h>
40#include <sys/unistd.h>
41
42static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
43static void	*taskqueue_giant_ih;
44static void	*taskqueue_ih;
45static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
46static struct mtx taskqueue_queues_mutex;
47static struct proc *taskqueue_thread_proc;
48
49struct taskqueue {
50	STAILQ_ENTRY(taskqueue)	tq_link;
51	STAILQ_HEAD(, task)	tq_queue;
52	const char		*tq_name;
53	taskqueue_enqueue_fn	tq_enqueue;
54	void			*tq_context;
55	struct mtx		tq_mutex;
56};
57
58static void	init_taskqueue_list(void *data);
59
60static void
61init_taskqueue_list(void *data __unused)
62{
63
64	mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
65	STAILQ_INIT(&taskqueue_queues);
66}
67SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
68    NULL);
69
70struct taskqueue *
71taskqueue_create(const char *name, int mflags,
72		 taskqueue_enqueue_fn enqueue, void *context)
73{
74	struct taskqueue *queue;
75
76	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
77	if (!queue)
78		return 0;
79
80	STAILQ_INIT(&queue->tq_queue);
81	queue->tq_name = name;
82	queue->tq_enqueue = enqueue;
83	queue->tq_context = context;
84	mtx_init(&queue->tq_mutex, "taskqueue", NULL, MTX_DEF);
85
86	mtx_lock(&taskqueue_queues_mutex);
87	STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
88	mtx_unlock(&taskqueue_queues_mutex);
89
90	return queue;
91}
92
93void
94taskqueue_free(struct taskqueue *queue)
95{
96
97	mtx_lock(&taskqueue_queues_mutex);
98	STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
99	mtx_unlock(&taskqueue_queues_mutex);
100
101	mtx_lock(&queue->tq_mutex);
102	taskqueue_run(queue);
103	mtx_destroy(&queue->tq_mutex);
104	free(queue, M_TASKQUEUE);
105}
106
107/*
108 * Returns with the taskqueue locked.
109 */
110struct taskqueue *
111taskqueue_find(const char *name)
112{
113	struct taskqueue *queue;
114
115	mtx_lock(&taskqueue_queues_mutex);
116	STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
117		if (strcmp(queue->tq_name, name) == 0) {
118			mtx_lock(&queue->tq_mutex);
119			mtx_unlock(&taskqueue_queues_mutex);
120			return queue;
121		}
122	}
123	mtx_unlock(&taskqueue_queues_mutex);
124	return NULL;
125}
126
127int
128taskqueue_enqueue(struct taskqueue *queue, struct task *task)
129{
130	struct task *ins;
131	struct task *prev;
132
133	mtx_lock(&queue->tq_mutex);
134
135	/*
136	 * Count multiple enqueues.
137	 */
138	if (task->ta_pending) {
139		task->ta_pending++;
140		mtx_unlock(&queue->tq_mutex);
141		return 0;
142	}
143
144	/*
145	 * Optimise the case when all tasks have the same priority.
146	 */
147	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
148	if (!prev || prev->ta_priority >= task->ta_priority) {
149		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
150	} else {
151		prev = 0;
152		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
153		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
154			if (ins->ta_priority < task->ta_priority)
155				break;
156
157		if (prev)
158			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
159		else
160			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
161	}
162
163	task->ta_pending = 1;
164	if (queue->tq_enqueue)
165		queue->tq_enqueue(queue->tq_context);
166
167	mtx_unlock(&queue->tq_mutex);
168
169	return 0;
170}
171
172void
173taskqueue_run(struct taskqueue *queue)
174{
175	struct task *task;
176	int owned, pending;
177
178	owned = mtx_owned(&queue->tq_mutex);
179	if (!owned)
180		mtx_lock(&queue->tq_mutex);
181	while (STAILQ_FIRST(&queue->tq_queue)) {
182		/*
183		 * Carefully remove the first task from the queue and
184		 * zero its pending count.
185		 */
186		task = STAILQ_FIRST(&queue->tq_queue);
187		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
188		pending = task->ta_pending;
189		task->ta_pending = 0;
190		mtx_unlock(&queue->tq_mutex);
191
192		task->ta_func(task->ta_context, pending);
193
194		mtx_lock(&queue->tq_mutex);
195	}
196
197	/*
198	 * For compatibility, unlock on return if the queue was not locked
199	 * on entry, although this opens a race window.
200	 */
201	if (!owned)
202		mtx_unlock(&queue->tq_mutex);
203}
204
205static void
206taskqueue_swi_enqueue(void *context)
207{
208	swi_sched(taskqueue_ih, 0);
209}
210
211static void
212taskqueue_swi_run(void *dummy)
213{
214	taskqueue_run(taskqueue_swi);
215}
216
217static void
218taskqueue_swi_giant_enqueue(void *context)
219{
220	swi_sched(taskqueue_giant_ih, 0);
221}
222
223static void
224taskqueue_swi_giant_run(void *dummy)
225{
226	taskqueue_run(taskqueue_swi_giant);
227}
228
229static void
230taskqueue_thread_loop(void *dummy)
231{
232
233	mtx_lock(&taskqueue_thread->tq_mutex);
234	for (;;) {
235		taskqueue_run(taskqueue_thread);
236		msleep(taskqueue_thread, &taskqueue_thread->tq_mutex, PWAIT,
237		    "-", 0);
238	}
239}
240
241static void
242taskqueue_thread_enqueue(void *context)
243{
244
245	mtx_assert(&taskqueue_thread->tq_mutex, MA_OWNED);
246	wakeup(taskqueue_thread);
247}
248
249TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
250		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
251		     INTR_MPSAFE, &taskqueue_ih));
252
253TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
254		 swi_add(NULL, "Giant task queue", taskqueue_swi_giant_run,
255		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
256
257TASKQUEUE_DEFINE(thread, taskqueue_thread_enqueue, 0,
258		 kthread_create(taskqueue_thread_loop, NULL,
259		 &taskqueue_thread_proc, 0, 0, "taskqueue"));
260
261int
262taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
263{
264	struct task *ins;
265	struct task *prev;
266
267	mtx_lock_spin(&queue->tq_mutex);
268
269	/*
270	 * Count multiple enqueues.
271	 */
272	if (task->ta_pending) {
273		task->ta_pending++;
274		mtx_unlock_spin(&queue->tq_mutex);
275		return 0;
276	}
277
278	/*
279	 * Optimise the case when all tasks have the same priority.
280	 */
281	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
282	if (!prev || prev->ta_priority >= task->ta_priority) {
283		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
284	} else {
285		prev = 0;
286		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
287		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
288			if (ins->ta_priority < task->ta_priority)
289				break;
290
291		if (prev)
292			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
293		else
294			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
295	}
296
297	task->ta_pending = 1;
298	if (queue->tq_enqueue)
299		queue->tq_enqueue(queue->tq_context);
300
301	mtx_unlock_spin(&queue->tq_mutex);
302
303	return 0;
304}
305
306static void
307taskqueue_run_fast(struct taskqueue *queue)
308{
309	struct task *task;
310	int pending;
311
312	mtx_lock_spin(&queue->tq_mutex);
313	while (STAILQ_FIRST(&queue->tq_queue)) {
314		/*
315		 * Carefully remove the first task from the queue and
316		 * zero its pending count.
317		 */
318		task = STAILQ_FIRST(&queue->tq_queue);
319		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
320		pending = task->ta_pending;
321		task->ta_pending = 0;
322		mtx_unlock_spin(&queue->tq_mutex);
323
324		task->ta_func(task->ta_context, pending);
325
326		mtx_lock_spin(&queue->tq_mutex);
327	}
328	mtx_unlock_spin(&queue->tq_mutex);
329}
330
331struct taskqueue *taskqueue_fast;
332static void	*taskqueue_fast_ih;
333
334static void
335taskqueue_fast_schedule(void *context)
336{
337	swi_sched(taskqueue_fast_ih, 0);
338}
339
340static void
341taskqueue_fast_run(void *dummy)
342{
343	taskqueue_run_fast(taskqueue_fast);
344}
345
346static void
347taskqueue_define_fast(void *arg)
348{
349
350	taskqueue_fast = malloc(sizeof(struct taskqueue), M_TASKQUEUE,
351	    M_NOWAIT | M_ZERO);
352	if (!taskqueue_fast) {
353		printf("%s: Unable to allocate fast task queue!\n", __func__);
354		return;
355	}
356
357	STAILQ_INIT(&taskqueue_fast->tq_queue);
358	taskqueue_fast->tq_name = "fast";
359	taskqueue_fast->tq_enqueue = taskqueue_fast_schedule;
360	mtx_init(&taskqueue_fast->tq_mutex, "taskqueue_fast", NULL, MTX_SPIN);
361
362	mtx_lock(&taskqueue_queues_mutex);
363	STAILQ_INSERT_TAIL(&taskqueue_queues, taskqueue_fast, tq_link);
364	mtx_unlock(&taskqueue_queues_mutex);
365
366	swi_add(NULL, "Fast task queue", taskqueue_fast_run,
367		NULL, SWI_TQ_FAST, 0, &taskqueue_fast_ih);
368}
369SYSINIT(taskqueue_fast, SI_SUB_CONFIGURE, SI_ORDER_SECOND,
370    taskqueue_define_fast, NULL);
371