subr_taskqueue.c revision 256281
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/kern/subr_taskqueue.c 254787 2013-08-24 14:41:49Z mav $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/limits.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42#include <sys/taskqueue.h>
43#include <sys/unistd.h>
44#include <machine/stdarg.h>
45
46static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47static void	*taskqueue_giant_ih;
48static void	*taskqueue_ih;
49
50struct taskqueue_busy {
51	struct task	*tb_running;
52	TAILQ_ENTRY(taskqueue_busy) tb_link;
53};
54
55struct taskqueue {
56	STAILQ_HEAD(, task)	tq_queue;
57	taskqueue_enqueue_fn	tq_enqueue;
58	void			*tq_context;
59	TAILQ_HEAD(, taskqueue_busy) tq_active;
60	struct mtx		tq_mutex;
61	struct thread		**tq_threads;
62	int			tq_tcount;
63	int			tq_spin;
64	int			tq_flags;
65	int			tq_callouts;
66	taskqueue_callback_fn	tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
67	void			*tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
68};
69
70#define	TQ_FLAGS_ACTIVE		(1 << 0)
71#define	TQ_FLAGS_BLOCKED	(1 << 1)
72#define	TQ_FLAGS_PENDING	(1 << 2)
73
74#define	DT_CALLOUT_ARMED	(1 << 0)
75
76#define	TQ_LOCK(tq)							\
77	do {								\
78		if ((tq)->tq_spin)					\
79			mtx_lock_spin(&(tq)->tq_mutex);			\
80		else							\
81			mtx_lock(&(tq)->tq_mutex);			\
82	} while (0)
83#define	TQ_ASSERT_LOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_OWNED)
84
85#define	TQ_UNLOCK(tq)							\
86	do {								\
87		if ((tq)->tq_spin)					\
88			mtx_unlock_spin(&(tq)->tq_mutex);		\
89		else							\
90			mtx_unlock(&(tq)->tq_mutex);			\
91	} while (0)
92#define	TQ_ASSERT_UNLOCKED(tq)	mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
93
94void
95_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
96    int priority, task_fn_t func, void *context)
97{
98
99	TASK_INIT(&timeout_task->t, priority, func, context);
100	callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0);
101	timeout_task->q = queue;
102	timeout_task->f = 0;
103}
104
105static __inline int
106TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
107    int t)
108{
109	if (tq->tq_spin)
110		return (msleep_spin(p, m, wm, t));
111	return (msleep(p, m, pri, wm, t));
112}
113
114static struct taskqueue *
115_taskqueue_create(const char *name __unused, int mflags,
116		 taskqueue_enqueue_fn enqueue, void *context,
117		 int mtxflags, const char *mtxname)
118{
119	struct taskqueue *queue;
120
121	queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
122	if (!queue)
123		return NULL;
124
125	STAILQ_INIT(&queue->tq_queue);
126	TAILQ_INIT(&queue->tq_active);
127	queue->tq_enqueue = enqueue;
128	queue->tq_context = context;
129	queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
130	queue->tq_flags |= TQ_FLAGS_ACTIVE;
131	mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
132
133	return queue;
134}
135
136struct taskqueue *
137taskqueue_create(const char *name, int mflags,
138		 taskqueue_enqueue_fn enqueue, void *context)
139{
140	return _taskqueue_create(name, mflags, enqueue, context,
141			MTX_DEF, "taskqueue");
142}
143
144void
145taskqueue_set_callback(struct taskqueue *queue,
146    enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
147    void *context)
148{
149
150	KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
151	    (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
152	    ("Callback type %d not valid, must be %d-%d", cb_type,
153	    TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
154	KASSERT((queue->tq_callbacks[cb_type] == NULL),
155	    ("Re-initialization of taskqueue callback?"));
156
157	queue->tq_callbacks[cb_type] = callback;
158	queue->tq_cb_contexts[cb_type] = context;
159}
160
161/*
162 * Signal a taskqueue thread to terminate.
163 */
164static void
165taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
166{
167
168	while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
169		wakeup(tq);
170		TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
171	}
172}
173
174void
175taskqueue_free(struct taskqueue *queue)
176{
177
178	TQ_LOCK(queue);
179	queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
180	taskqueue_terminate(queue->tq_threads, queue);
181	KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
182	KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
183	mtx_destroy(&queue->tq_mutex);
184	free(queue->tq_threads, M_TASKQUEUE);
185	free(queue, M_TASKQUEUE);
186}
187
188static int
189taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
190{
191	struct task *ins;
192	struct task *prev;
193
194	/*
195	 * Count multiple enqueues.
196	 */
197	if (task->ta_pending) {
198		if (task->ta_pending < USHRT_MAX)
199			task->ta_pending++;
200		return (0);
201	}
202
203	/*
204	 * Optimise the case when all tasks have the same priority.
205	 */
206	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
207	if (!prev || prev->ta_priority >= task->ta_priority) {
208		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
209	} else {
210		prev = NULL;
211		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
212		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
213			if (ins->ta_priority < task->ta_priority)
214				break;
215
216		if (prev)
217			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
218		else
219			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
220	}
221
222	task->ta_pending = 1;
223	if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
224		queue->tq_enqueue(queue->tq_context);
225	else
226		queue->tq_flags |= TQ_FLAGS_PENDING;
227
228	return (0);
229}
230int
231taskqueue_enqueue(struct taskqueue *queue, struct task *task)
232{
233	int res;
234
235	TQ_LOCK(queue);
236	res = taskqueue_enqueue_locked(queue, task);
237	TQ_UNLOCK(queue);
238
239	return (res);
240}
241
242static void
243taskqueue_timeout_func(void *arg)
244{
245	struct taskqueue *queue;
246	struct timeout_task *timeout_task;
247
248	timeout_task = arg;
249	queue = timeout_task->q;
250	KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
251	timeout_task->f &= ~DT_CALLOUT_ARMED;
252	queue->tq_callouts--;
253	taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
254}
255
256int
257taskqueue_enqueue_timeout(struct taskqueue *queue,
258    struct timeout_task *timeout_task, int ticks)
259{
260	int res;
261
262	TQ_LOCK(queue);
263	KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
264	    ("Migrated queue"));
265	KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
266	timeout_task->q = queue;
267	res = timeout_task->t.ta_pending;
268	if (ticks == 0) {
269		taskqueue_enqueue_locked(queue, &timeout_task->t);
270	} else {
271		if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
272			res++;
273		} else {
274			queue->tq_callouts++;
275			timeout_task->f |= DT_CALLOUT_ARMED;
276			if (ticks < 0)
277				ticks = -ticks; /* Ignore overflow. */
278		}
279		if (ticks > 0) {
280			callout_reset(&timeout_task->c, ticks,
281			    taskqueue_timeout_func, timeout_task);
282		}
283	}
284	TQ_UNLOCK(queue);
285	return (res);
286}
287
288void
289taskqueue_block(struct taskqueue *queue)
290{
291
292	TQ_LOCK(queue);
293	queue->tq_flags |= TQ_FLAGS_BLOCKED;
294	TQ_UNLOCK(queue);
295}
296
297void
298taskqueue_unblock(struct taskqueue *queue)
299{
300
301	TQ_LOCK(queue);
302	queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
303	if (queue->tq_flags & TQ_FLAGS_PENDING) {
304		queue->tq_flags &= ~TQ_FLAGS_PENDING;
305		queue->tq_enqueue(queue->tq_context);
306	}
307	TQ_UNLOCK(queue);
308}
309
310static void
311taskqueue_run_locked(struct taskqueue *queue)
312{
313	struct taskqueue_busy tb;
314	struct task *task;
315	int pending;
316
317	TQ_ASSERT_LOCKED(queue);
318	tb.tb_running = NULL;
319	TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
320
321	while (STAILQ_FIRST(&queue->tq_queue)) {
322		/*
323		 * Carefully remove the first task from the queue and
324		 * zero its pending count.
325		 */
326		task = STAILQ_FIRST(&queue->tq_queue);
327		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
328		pending = task->ta_pending;
329		task->ta_pending = 0;
330		tb.tb_running = task;
331		TQ_UNLOCK(queue);
332
333		task->ta_func(task->ta_context, pending);
334
335		TQ_LOCK(queue);
336		tb.tb_running = NULL;
337		wakeup(task);
338	}
339	TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
340}
341
342void
343taskqueue_run(struct taskqueue *queue)
344{
345
346	TQ_LOCK(queue);
347	taskqueue_run_locked(queue);
348	TQ_UNLOCK(queue);
349}
350
351static int
352task_is_running(struct taskqueue *queue, struct task *task)
353{
354	struct taskqueue_busy *tb;
355
356	TQ_ASSERT_LOCKED(queue);
357	TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
358		if (tb->tb_running == task)
359			return (1);
360	}
361	return (0);
362}
363
364static int
365taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
366    u_int *pendp)
367{
368
369	if (task->ta_pending > 0)
370		STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
371	if (pendp != NULL)
372		*pendp = task->ta_pending;
373	task->ta_pending = 0;
374	return (task_is_running(queue, task) ? EBUSY : 0);
375}
376
377int
378taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
379{
380	u_int pending;
381	int error;
382
383	TQ_LOCK(queue);
384	pending = task->ta_pending;
385	error = taskqueue_cancel_locked(queue, task, pendp);
386	TQ_UNLOCK(queue);
387
388	return (error);
389}
390
391int
392taskqueue_cancel_timeout(struct taskqueue *queue,
393    struct timeout_task *timeout_task, u_int *pendp)
394{
395	u_int pending, pending1;
396	int error;
397
398	TQ_LOCK(queue);
399	pending = !!callout_stop(&timeout_task->c);
400	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
401	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
402		timeout_task->f &= ~DT_CALLOUT_ARMED;
403		queue->tq_callouts--;
404	}
405	TQ_UNLOCK(queue);
406
407	if (pendp != NULL)
408		*pendp = pending + pending1;
409	return (error);
410}
411
412void
413taskqueue_drain(struct taskqueue *queue, struct task *task)
414{
415
416	if (!queue->tq_spin)
417		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
418
419	TQ_LOCK(queue);
420	while (task->ta_pending != 0 || task_is_running(queue, task))
421		TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
422	TQ_UNLOCK(queue);
423}
424
425void
426taskqueue_drain_timeout(struct taskqueue *queue,
427    struct timeout_task *timeout_task)
428{
429
430	callout_drain(&timeout_task->c);
431	taskqueue_drain(queue, &timeout_task->t);
432}
433
434static void
435taskqueue_swi_enqueue(void *context)
436{
437	swi_sched(taskqueue_ih, 0);
438}
439
440static void
441taskqueue_swi_run(void *dummy)
442{
443	taskqueue_run(taskqueue_swi);
444}
445
446static void
447taskqueue_swi_giant_enqueue(void *context)
448{
449	swi_sched(taskqueue_giant_ih, 0);
450}
451
452static void
453taskqueue_swi_giant_run(void *dummy)
454{
455	taskqueue_run(taskqueue_swi_giant);
456}
457
458int
459taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
460			const char *name, ...)
461{
462	va_list ap;
463	struct thread *td;
464	struct taskqueue *tq;
465	int i, error;
466	char ktname[MAXCOMLEN + 1];
467
468	if (count <= 0)
469		return (EINVAL);
470
471	tq = *tqp;
472
473	va_start(ap, name);
474	vsnprintf(ktname, sizeof(ktname), name, ap);
475	va_end(ap);
476
477	tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
478	    M_NOWAIT | M_ZERO);
479	if (tq->tq_threads == NULL) {
480		printf("%s: no memory for %s threads\n", __func__, ktname);
481		return (ENOMEM);
482	}
483
484	for (i = 0; i < count; i++) {
485		if (count == 1)
486			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
487			    &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
488		else
489			error = kthread_add(taskqueue_thread_loop, tqp, NULL,
490			    &tq->tq_threads[i], RFSTOPPED, 0,
491			    "%s_%d", ktname, i);
492		if (error) {
493			/* should be ok to continue, taskqueue_free will dtrt */
494			printf("%s: kthread_add(%s): error %d", __func__,
495			    ktname, error);
496			tq->tq_threads[i] = NULL;		/* paranoid */
497		} else
498			tq->tq_tcount++;
499	}
500	for (i = 0; i < count; i++) {
501		if (tq->tq_threads[i] == NULL)
502			continue;
503		td = tq->tq_threads[i];
504		thread_lock(td);
505		sched_prio(td, pri);
506		sched_add(td, SRQ_BORING);
507		thread_unlock(td);
508	}
509
510	return (0);
511}
512
513static inline void
514taskqueue_run_callback(struct taskqueue *tq,
515    enum taskqueue_callback_type cb_type)
516{
517	taskqueue_callback_fn tq_callback;
518
519	TQ_ASSERT_UNLOCKED(tq);
520	tq_callback = tq->tq_callbacks[cb_type];
521	if (tq_callback != NULL)
522		tq_callback(tq->tq_cb_contexts[cb_type]);
523}
524
525void
526taskqueue_thread_loop(void *arg)
527{
528	struct taskqueue **tqp, *tq;
529
530	tqp = arg;
531	tq = *tqp;
532	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
533	TQ_LOCK(tq);
534	while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
535		taskqueue_run_locked(tq);
536		/*
537		 * Because taskqueue_run() can drop tq_mutex, we need to
538		 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
539		 * meantime, which means we missed a wakeup.
540		 */
541		if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
542			break;
543		TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
544	}
545	taskqueue_run_locked(tq);
546
547	/*
548	 * This thread is on its way out, so just drop the lock temporarily
549	 * in order to call the shutdown callback.  This allows the callback
550	 * to look at the taskqueue, even just before it dies.
551	 */
552	TQ_UNLOCK(tq);
553	taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
554	TQ_LOCK(tq);
555
556	/* rendezvous with thread that asked us to terminate */
557	tq->tq_tcount--;
558	wakeup_one(tq->tq_threads);
559	TQ_UNLOCK(tq);
560	kthread_exit();
561}
562
563void
564taskqueue_thread_enqueue(void *context)
565{
566	struct taskqueue **tqp, *tq;
567
568	tqp = context;
569	tq = *tqp;
570
571	TQ_ASSERT_LOCKED(tq);
572	wakeup_one(tq);
573}
574
575TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
576		 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
577		     INTR_MPSAFE, &taskqueue_ih));
578
579TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
580		 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
581		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
582
583TASKQUEUE_DEFINE_THREAD(thread);
584
585struct taskqueue *
586taskqueue_create_fast(const char *name, int mflags,
587		 taskqueue_enqueue_fn enqueue, void *context)
588{
589	return _taskqueue_create(name, mflags, enqueue, context,
590			MTX_SPIN, "fast_taskqueue");
591}
592
593/* NB: for backwards compatibility */
594int
595taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
596{
597	return taskqueue_enqueue(queue, task);
598}
599
600static void	*taskqueue_fast_ih;
601
602static void
603taskqueue_fast_enqueue(void *context)
604{
605	swi_sched(taskqueue_fast_ih, 0);
606}
607
608static void
609taskqueue_fast_run(void *dummy)
610{
611	taskqueue_run(taskqueue_fast);
612}
613
614TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
615	swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
616	SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
617
618int
619taskqueue_member(struct taskqueue *queue, struct thread *td)
620{
621	int i, j, ret = 0;
622
623	for (i = 0, j = 0; ; i++) {
624		if (queue->tq_threads[i] == NULL)
625			continue;
626		if (queue->tq_threads[i] == td) {
627			ret = 1;
628			break;
629		}
630		if (++j >= queue->tq_tcount)
631			break;
632	}
633	return (ret);
634}
635