Deleted Added
full compact
subr_taskqueue.c (221059) subr_taskqueue.c (225570)
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 221059 2011-04-26 11:39:56Z kib $");
28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 225570 2011-09-15 08:42:06Z adrian $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/limits.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/mutex.h>
39#include <sys/proc.h>
40#include <sys/sched.h>
41#include <sys/taskqueue.h>
42#include <sys/unistd.h>
43#include <machine/stdarg.h>
44
45static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
46static void *taskqueue_giant_ih;
47static void *taskqueue_ih;
48
49struct taskqueue_busy {
50 struct task *tb_running;
51 TAILQ_ENTRY(taskqueue_busy) tb_link;
52};
53
54struct taskqueue {
55 STAILQ_HEAD(, task) tq_queue;
56 taskqueue_enqueue_fn tq_enqueue;
57 void *tq_context;
58 TAILQ_HEAD(, taskqueue_busy) tq_active;
59 struct mtx tq_mutex;
60 struct thread **tq_threads;
61 int tq_tcount;
62 int tq_spin;
63 int tq_flags;
64 int tq_callouts;
65};
66
67#define TQ_FLAGS_ACTIVE (1 << 0)
68#define TQ_FLAGS_BLOCKED (1 << 1)
69#define TQ_FLAGS_PENDING (1 << 2)
70
71#define DT_CALLOUT_ARMED (1 << 0)
72
73#define TQ_LOCK(tq) \
74 do { \
75 if ((tq)->tq_spin) \
76 mtx_lock_spin(&(tq)->tq_mutex); \
77 else \
78 mtx_lock(&(tq)->tq_mutex); \
79 } while (0)
80
81#define TQ_UNLOCK(tq) \
82 do { \
83 if ((tq)->tq_spin) \
84 mtx_unlock_spin(&(tq)->tq_mutex); \
85 else \
86 mtx_unlock(&(tq)->tq_mutex); \
87 } while (0)
88
89void
90_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
91 int priority, task_fn_t func, void *context)
92{
93
94 TASK_INIT(&timeout_task->t, priority, func, context);
95 callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0);
96 timeout_task->q = queue;
97 timeout_task->f = 0;
98}
99
100static __inline int
101TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
102 int t)
103{
104 if (tq->tq_spin)
105 return (msleep_spin(p, m, wm, t));
106 return (msleep(p, m, pri, wm, t));
107}
108
109static struct taskqueue *
110_taskqueue_create(const char *name __unused, int mflags,
111 taskqueue_enqueue_fn enqueue, void *context,
112 int mtxflags, const char *mtxname)
113{
114 struct taskqueue *queue;
115
116 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
117 if (!queue)
118 return NULL;
119
120 STAILQ_INIT(&queue->tq_queue);
121 TAILQ_INIT(&queue->tq_active);
122 queue->tq_enqueue = enqueue;
123 queue->tq_context = context;
124 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
125 queue->tq_flags |= TQ_FLAGS_ACTIVE;
126 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
127
128 return queue;
129}
130
131struct taskqueue *
132taskqueue_create(const char *name, int mflags,
133 taskqueue_enqueue_fn enqueue, void *context)
134{
135 return _taskqueue_create(name, mflags, enqueue, context,
136 MTX_DEF, "taskqueue");
137}
138
139/*
140 * Signal a taskqueue thread to terminate.
141 */
142static void
143taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
144{
145
146 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
147 wakeup(tq);
148 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
149 }
150}
151
152void
153taskqueue_free(struct taskqueue *queue)
154{
155
156 TQ_LOCK(queue);
157 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
158 taskqueue_terminate(queue->tq_threads, queue);
159 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
160 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
161 mtx_destroy(&queue->tq_mutex);
162 free(queue->tq_threads, M_TASKQUEUE);
163 free(queue, M_TASKQUEUE);
164}
165
166static int
167taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
168{
169 struct task *ins;
170 struct task *prev;
171
172 /*
173 * Count multiple enqueues.
174 */
175 if (task->ta_pending) {
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42#include <sys/taskqueue.h>
43#include <sys/unistd.h>
44#include <machine/stdarg.h>
45
46static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47static void *taskqueue_giant_ih;
48static void *taskqueue_ih;
49
50struct taskqueue_busy {
51 struct task *tb_running;
52 TAILQ_ENTRY(taskqueue_busy) tb_link;
53};
54
55struct taskqueue {
56 STAILQ_HEAD(, task) tq_queue;
57 taskqueue_enqueue_fn tq_enqueue;
58 void *tq_context;
59 TAILQ_HEAD(, taskqueue_busy) tq_active;
60 struct mtx tq_mutex;
61 struct thread **tq_threads;
62 int tq_tcount;
63 int tq_spin;
64 int tq_flags;
65 int tq_callouts;
66};
67
68#define TQ_FLAGS_ACTIVE (1 << 0)
69#define TQ_FLAGS_BLOCKED (1 << 1)
70#define TQ_FLAGS_PENDING (1 << 2)
71
72#define DT_CALLOUT_ARMED (1 << 0)
73
74#define TQ_LOCK(tq) \
75 do { \
76 if ((tq)->tq_spin) \
77 mtx_lock_spin(&(tq)->tq_mutex); \
78 else \
79 mtx_lock(&(tq)->tq_mutex); \
80 } while (0)
81
82#define TQ_UNLOCK(tq) \
83 do { \
84 if ((tq)->tq_spin) \
85 mtx_unlock_spin(&(tq)->tq_mutex); \
86 else \
87 mtx_unlock(&(tq)->tq_mutex); \
88 } while (0)
89
90void
91_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
92 int priority, task_fn_t func, void *context)
93{
94
95 TASK_INIT(&timeout_task->t, priority, func, context);
96 callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0);
97 timeout_task->q = queue;
98 timeout_task->f = 0;
99}
100
101static __inline int
102TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
103 int t)
104{
105 if (tq->tq_spin)
106 return (msleep_spin(p, m, wm, t));
107 return (msleep(p, m, pri, wm, t));
108}
109
110static struct taskqueue *
111_taskqueue_create(const char *name __unused, int mflags,
112 taskqueue_enqueue_fn enqueue, void *context,
113 int mtxflags, const char *mtxname)
114{
115 struct taskqueue *queue;
116
117 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
118 if (!queue)
119 return NULL;
120
121 STAILQ_INIT(&queue->tq_queue);
122 TAILQ_INIT(&queue->tq_active);
123 queue->tq_enqueue = enqueue;
124 queue->tq_context = context;
125 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
126 queue->tq_flags |= TQ_FLAGS_ACTIVE;
127 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
128
129 return queue;
130}
131
132struct taskqueue *
133taskqueue_create(const char *name, int mflags,
134 taskqueue_enqueue_fn enqueue, void *context)
135{
136 return _taskqueue_create(name, mflags, enqueue, context,
137 MTX_DEF, "taskqueue");
138}
139
140/*
141 * Signal a taskqueue thread to terminate.
142 */
143static void
144taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
145{
146
147 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
148 wakeup(tq);
149 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
150 }
151}
152
153void
154taskqueue_free(struct taskqueue *queue)
155{
156
157 TQ_LOCK(queue);
158 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
159 taskqueue_terminate(queue->tq_threads, queue);
160 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
161 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
162 mtx_destroy(&queue->tq_mutex);
163 free(queue->tq_threads, M_TASKQUEUE);
164 free(queue, M_TASKQUEUE);
165}
166
167static int
168taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
169{
170 struct task *ins;
171 struct task *prev;
172
173 /*
174 * Count multiple enqueues.
175 */
176 if (task->ta_pending) {
176 task->ta_pending++;
177 if (task->ta_pending < USHRT_MAX)
178 task->ta_pending++;
177 return (0);
178 }
179
180 /*
181 * Optimise the case when all tasks have the same priority.
182 */
183 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
184 if (!prev || prev->ta_priority >= task->ta_priority) {
185 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
186 } else {
187 prev = NULL;
188 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
189 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
190 if (ins->ta_priority < task->ta_priority)
191 break;
192
193 if (prev)
194 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
195 else
196 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
197 }
198
199 task->ta_pending = 1;
200 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
201 queue->tq_enqueue(queue->tq_context);
202 else
203 queue->tq_flags |= TQ_FLAGS_PENDING;
204
205 return (0);
206}
207int
208taskqueue_enqueue(struct taskqueue *queue, struct task *task)
209{
210 int res;
211
212 TQ_LOCK(queue);
213 res = taskqueue_enqueue_locked(queue, task);
214 TQ_UNLOCK(queue);
215
216 return (res);
217}
218
219static void
220taskqueue_timeout_func(void *arg)
221{
222 struct taskqueue *queue;
223 struct timeout_task *timeout_task;
224
225 timeout_task = arg;
226 queue = timeout_task->q;
227 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
228 timeout_task->f &= ~DT_CALLOUT_ARMED;
229 queue->tq_callouts--;
230 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
231}
232
233int
234taskqueue_enqueue_timeout(struct taskqueue *queue,
235 struct timeout_task *timeout_task, int ticks)
236{
237 int res;
238
239 TQ_LOCK(queue);
240 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
241 ("Migrated queue"));
242 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
243 timeout_task->q = queue;
244 res = timeout_task->t.ta_pending;
245 if (ticks == 0) {
246 taskqueue_enqueue_locked(queue, &timeout_task->t);
247 } else {
248 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
249 res++;
250 } else {
251 queue->tq_callouts++;
252 timeout_task->f |= DT_CALLOUT_ARMED;
253 }
254 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func,
255 timeout_task);
256 }
257 TQ_UNLOCK(queue);
258 return (res);
259}
260
261void
262taskqueue_block(struct taskqueue *queue)
263{
264
265 TQ_LOCK(queue);
266 queue->tq_flags |= TQ_FLAGS_BLOCKED;
267 TQ_UNLOCK(queue);
268}
269
270void
271taskqueue_unblock(struct taskqueue *queue)
272{
273
274 TQ_LOCK(queue);
275 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
276 if (queue->tq_flags & TQ_FLAGS_PENDING) {
277 queue->tq_flags &= ~TQ_FLAGS_PENDING;
278 queue->tq_enqueue(queue->tq_context);
279 }
280 TQ_UNLOCK(queue);
281}
282
283static void
284taskqueue_run_locked(struct taskqueue *queue)
285{
286 struct taskqueue_busy tb;
287 struct task *task;
288 int pending;
289
290 mtx_assert(&queue->tq_mutex, MA_OWNED);
291 tb.tb_running = NULL;
292 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
293
294 while (STAILQ_FIRST(&queue->tq_queue)) {
295 /*
296 * Carefully remove the first task from the queue and
297 * zero its pending count.
298 */
299 task = STAILQ_FIRST(&queue->tq_queue);
300 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
301 pending = task->ta_pending;
302 task->ta_pending = 0;
303 tb.tb_running = task;
304 TQ_UNLOCK(queue);
305
306 task->ta_func(task->ta_context, pending);
307
308 TQ_LOCK(queue);
309 tb.tb_running = NULL;
310 wakeup(task);
311 }
312 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
313}
314
315void
316taskqueue_run(struct taskqueue *queue)
317{
318
319 TQ_LOCK(queue);
320 taskqueue_run_locked(queue);
321 TQ_UNLOCK(queue);
322}
323
324static int
325task_is_running(struct taskqueue *queue, struct task *task)
326{
327 struct taskqueue_busy *tb;
328
329 mtx_assert(&queue->tq_mutex, MA_OWNED);
330 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
331 if (tb->tb_running == task)
332 return (1);
333 }
334 return (0);
335}
336
337static int
338taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
339 u_int *pendp)
340{
341
342 if (task->ta_pending > 0)
343 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
344 if (pendp != NULL)
345 *pendp = task->ta_pending;
346 task->ta_pending = 0;
347 return (task_is_running(queue, task) ? EBUSY : 0);
348}
349
350int
351taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
352{
353 u_int pending;
354 int error;
355
356 TQ_LOCK(queue);
357 pending = task->ta_pending;
358 error = taskqueue_cancel_locked(queue, task, pendp);
359 TQ_UNLOCK(queue);
360
361 return (error);
362}
363
364int
365taskqueue_cancel_timeout(struct taskqueue *queue,
366 struct timeout_task *timeout_task, u_int *pendp)
367{
368 u_int pending, pending1;
369 int error;
370
371 TQ_LOCK(queue);
372 pending = !!callout_stop(&timeout_task->c);
373 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
374 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
375 timeout_task->f &= ~DT_CALLOUT_ARMED;
376 queue->tq_callouts--;
377 }
378 TQ_UNLOCK(queue);
379
380 if (pendp != NULL)
381 *pendp = pending + pending1;
382 return (error);
383}
384
385void
386taskqueue_drain(struct taskqueue *queue, struct task *task)
387{
388
389 if (!queue->tq_spin)
390 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
391
392 TQ_LOCK(queue);
393 while (task->ta_pending != 0 || task_is_running(queue, task))
394 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
395 TQ_UNLOCK(queue);
396}
397
398void
399taskqueue_drain_timeout(struct taskqueue *queue,
400 struct timeout_task *timeout_task)
401{
402
403 callout_drain(&timeout_task->c);
404 taskqueue_drain(queue, &timeout_task->t);
405}
406
407static void
408taskqueue_swi_enqueue(void *context)
409{
410 swi_sched(taskqueue_ih, 0);
411}
412
413static void
414taskqueue_swi_run(void *dummy)
415{
416 taskqueue_run(taskqueue_swi);
417}
418
419static void
420taskqueue_swi_giant_enqueue(void *context)
421{
422 swi_sched(taskqueue_giant_ih, 0);
423}
424
425static void
426taskqueue_swi_giant_run(void *dummy)
427{
428 taskqueue_run(taskqueue_swi_giant);
429}
430
431int
432taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
433 const char *name, ...)
434{
435 va_list ap;
436 struct thread *td;
437 struct taskqueue *tq;
438 int i, error;
439 char ktname[MAXCOMLEN + 1];
440
441 if (count <= 0)
442 return (EINVAL);
443
444 tq = *tqp;
445
446 va_start(ap, name);
447 vsnprintf(ktname, sizeof(ktname), name, ap);
448 va_end(ap);
449
450 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
451 M_NOWAIT | M_ZERO);
452 if (tq->tq_threads == NULL) {
453 printf("%s: no memory for %s threads\n", __func__, ktname);
454 return (ENOMEM);
455 }
456
457 for (i = 0; i < count; i++) {
458 if (count == 1)
459 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
460 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
461 else
462 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
463 &tq->tq_threads[i], RFSTOPPED, 0,
464 "%s_%d", ktname, i);
465 if (error) {
466 /* should be ok to continue, taskqueue_free will dtrt */
467 printf("%s: kthread_add(%s): error %d", __func__,
468 ktname, error);
469 tq->tq_threads[i] = NULL; /* paranoid */
470 } else
471 tq->tq_tcount++;
472 }
473 for (i = 0; i < count; i++) {
474 if (tq->tq_threads[i] == NULL)
475 continue;
476 td = tq->tq_threads[i];
477 thread_lock(td);
478 sched_prio(td, pri);
479 sched_add(td, SRQ_BORING);
480 thread_unlock(td);
481 }
482
483 return (0);
484}
485
486void
487taskqueue_thread_loop(void *arg)
488{
489 struct taskqueue **tqp, *tq;
490
491 tqp = arg;
492 tq = *tqp;
493 TQ_LOCK(tq);
494 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
495 taskqueue_run_locked(tq);
496 /*
497 * Because taskqueue_run() can drop tq_mutex, we need to
498 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
499 * meantime, which means we missed a wakeup.
500 */
501 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
502 break;
503 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
504 }
505 taskqueue_run_locked(tq);
506
507 /* rendezvous with thread that asked us to terminate */
508 tq->tq_tcount--;
509 wakeup_one(tq->tq_threads);
510 TQ_UNLOCK(tq);
511 kthread_exit();
512}
513
514void
515taskqueue_thread_enqueue(void *context)
516{
517 struct taskqueue **tqp, *tq;
518
519 tqp = context;
520 tq = *tqp;
521
522 mtx_assert(&tq->tq_mutex, MA_OWNED);
523 wakeup_one(tq);
524}
525
526TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
527 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
528 INTR_MPSAFE, &taskqueue_ih));
529
530TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
531 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
532 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
533
534TASKQUEUE_DEFINE_THREAD(thread);
535
536struct taskqueue *
537taskqueue_create_fast(const char *name, int mflags,
538 taskqueue_enqueue_fn enqueue, void *context)
539{
540 return _taskqueue_create(name, mflags, enqueue, context,
541 MTX_SPIN, "fast_taskqueue");
542}
543
544/* NB: for backwards compatibility */
545int
546taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
547{
548 return taskqueue_enqueue(queue, task);
549}
550
551static void *taskqueue_fast_ih;
552
553static void
554taskqueue_fast_enqueue(void *context)
555{
556 swi_sched(taskqueue_fast_ih, 0);
557}
558
559static void
560taskqueue_fast_run(void *dummy)
561{
562 taskqueue_run(taskqueue_fast);
563}
564
565TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
566 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
567 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
568
569int
570taskqueue_member(struct taskqueue *queue, struct thread *td)
571{
572 int i, j, ret = 0;
573
574 TQ_LOCK(queue);
575 for (i = 0, j = 0; ; i++) {
576 if (queue->tq_threads[i] == NULL)
577 continue;
578 if (queue->tq_threads[i] == td) {
579 ret = 1;
580 break;
581 }
582 if (++j >= queue->tq_tcount)
583 break;
584 }
585 TQ_UNLOCK(queue);
586 return (ret);
587}
179 return (0);
180 }
181
182 /*
183 * Optimise the case when all tasks have the same priority.
184 */
185 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
186 if (!prev || prev->ta_priority >= task->ta_priority) {
187 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
188 } else {
189 prev = NULL;
190 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
191 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
192 if (ins->ta_priority < task->ta_priority)
193 break;
194
195 if (prev)
196 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
197 else
198 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
199 }
200
201 task->ta_pending = 1;
202 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
203 queue->tq_enqueue(queue->tq_context);
204 else
205 queue->tq_flags |= TQ_FLAGS_PENDING;
206
207 return (0);
208}
209int
210taskqueue_enqueue(struct taskqueue *queue, struct task *task)
211{
212 int res;
213
214 TQ_LOCK(queue);
215 res = taskqueue_enqueue_locked(queue, task);
216 TQ_UNLOCK(queue);
217
218 return (res);
219}
220
221static void
222taskqueue_timeout_func(void *arg)
223{
224 struct taskqueue *queue;
225 struct timeout_task *timeout_task;
226
227 timeout_task = arg;
228 queue = timeout_task->q;
229 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
230 timeout_task->f &= ~DT_CALLOUT_ARMED;
231 queue->tq_callouts--;
232 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
233}
234
235int
236taskqueue_enqueue_timeout(struct taskqueue *queue,
237 struct timeout_task *timeout_task, int ticks)
238{
239 int res;
240
241 TQ_LOCK(queue);
242 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
243 ("Migrated queue"));
244 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
245 timeout_task->q = queue;
246 res = timeout_task->t.ta_pending;
247 if (ticks == 0) {
248 taskqueue_enqueue_locked(queue, &timeout_task->t);
249 } else {
250 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
251 res++;
252 } else {
253 queue->tq_callouts++;
254 timeout_task->f |= DT_CALLOUT_ARMED;
255 }
256 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func,
257 timeout_task);
258 }
259 TQ_UNLOCK(queue);
260 return (res);
261}
262
263void
264taskqueue_block(struct taskqueue *queue)
265{
266
267 TQ_LOCK(queue);
268 queue->tq_flags |= TQ_FLAGS_BLOCKED;
269 TQ_UNLOCK(queue);
270}
271
272void
273taskqueue_unblock(struct taskqueue *queue)
274{
275
276 TQ_LOCK(queue);
277 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
278 if (queue->tq_flags & TQ_FLAGS_PENDING) {
279 queue->tq_flags &= ~TQ_FLAGS_PENDING;
280 queue->tq_enqueue(queue->tq_context);
281 }
282 TQ_UNLOCK(queue);
283}
284
285static void
286taskqueue_run_locked(struct taskqueue *queue)
287{
288 struct taskqueue_busy tb;
289 struct task *task;
290 int pending;
291
292 mtx_assert(&queue->tq_mutex, MA_OWNED);
293 tb.tb_running = NULL;
294 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
295
296 while (STAILQ_FIRST(&queue->tq_queue)) {
297 /*
298 * Carefully remove the first task from the queue and
299 * zero its pending count.
300 */
301 task = STAILQ_FIRST(&queue->tq_queue);
302 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
303 pending = task->ta_pending;
304 task->ta_pending = 0;
305 tb.tb_running = task;
306 TQ_UNLOCK(queue);
307
308 task->ta_func(task->ta_context, pending);
309
310 TQ_LOCK(queue);
311 tb.tb_running = NULL;
312 wakeup(task);
313 }
314 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
315}
316
317void
318taskqueue_run(struct taskqueue *queue)
319{
320
321 TQ_LOCK(queue);
322 taskqueue_run_locked(queue);
323 TQ_UNLOCK(queue);
324}
325
326static int
327task_is_running(struct taskqueue *queue, struct task *task)
328{
329 struct taskqueue_busy *tb;
330
331 mtx_assert(&queue->tq_mutex, MA_OWNED);
332 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
333 if (tb->tb_running == task)
334 return (1);
335 }
336 return (0);
337}
338
339static int
340taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
341 u_int *pendp)
342{
343
344 if (task->ta_pending > 0)
345 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
346 if (pendp != NULL)
347 *pendp = task->ta_pending;
348 task->ta_pending = 0;
349 return (task_is_running(queue, task) ? EBUSY : 0);
350}
351
352int
353taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
354{
355 u_int pending;
356 int error;
357
358 TQ_LOCK(queue);
359 pending = task->ta_pending;
360 error = taskqueue_cancel_locked(queue, task, pendp);
361 TQ_UNLOCK(queue);
362
363 return (error);
364}
365
366int
367taskqueue_cancel_timeout(struct taskqueue *queue,
368 struct timeout_task *timeout_task, u_int *pendp)
369{
370 u_int pending, pending1;
371 int error;
372
373 TQ_LOCK(queue);
374 pending = !!callout_stop(&timeout_task->c);
375 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
376 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
377 timeout_task->f &= ~DT_CALLOUT_ARMED;
378 queue->tq_callouts--;
379 }
380 TQ_UNLOCK(queue);
381
382 if (pendp != NULL)
383 *pendp = pending + pending1;
384 return (error);
385}
386
387void
388taskqueue_drain(struct taskqueue *queue, struct task *task)
389{
390
391 if (!queue->tq_spin)
392 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
393
394 TQ_LOCK(queue);
395 while (task->ta_pending != 0 || task_is_running(queue, task))
396 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
397 TQ_UNLOCK(queue);
398}
399
400void
401taskqueue_drain_timeout(struct taskqueue *queue,
402 struct timeout_task *timeout_task)
403{
404
405 callout_drain(&timeout_task->c);
406 taskqueue_drain(queue, &timeout_task->t);
407}
408
409static void
410taskqueue_swi_enqueue(void *context)
411{
412 swi_sched(taskqueue_ih, 0);
413}
414
415static void
416taskqueue_swi_run(void *dummy)
417{
418 taskqueue_run(taskqueue_swi);
419}
420
421static void
422taskqueue_swi_giant_enqueue(void *context)
423{
424 swi_sched(taskqueue_giant_ih, 0);
425}
426
427static void
428taskqueue_swi_giant_run(void *dummy)
429{
430 taskqueue_run(taskqueue_swi_giant);
431}
432
433int
434taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
435 const char *name, ...)
436{
437 va_list ap;
438 struct thread *td;
439 struct taskqueue *tq;
440 int i, error;
441 char ktname[MAXCOMLEN + 1];
442
443 if (count <= 0)
444 return (EINVAL);
445
446 tq = *tqp;
447
448 va_start(ap, name);
449 vsnprintf(ktname, sizeof(ktname), name, ap);
450 va_end(ap);
451
452 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
453 M_NOWAIT | M_ZERO);
454 if (tq->tq_threads == NULL) {
455 printf("%s: no memory for %s threads\n", __func__, ktname);
456 return (ENOMEM);
457 }
458
459 for (i = 0; i < count; i++) {
460 if (count == 1)
461 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
462 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
463 else
464 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
465 &tq->tq_threads[i], RFSTOPPED, 0,
466 "%s_%d", ktname, i);
467 if (error) {
468 /* should be ok to continue, taskqueue_free will dtrt */
469 printf("%s: kthread_add(%s): error %d", __func__,
470 ktname, error);
471 tq->tq_threads[i] = NULL; /* paranoid */
472 } else
473 tq->tq_tcount++;
474 }
475 for (i = 0; i < count; i++) {
476 if (tq->tq_threads[i] == NULL)
477 continue;
478 td = tq->tq_threads[i];
479 thread_lock(td);
480 sched_prio(td, pri);
481 sched_add(td, SRQ_BORING);
482 thread_unlock(td);
483 }
484
485 return (0);
486}
487
488void
489taskqueue_thread_loop(void *arg)
490{
491 struct taskqueue **tqp, *tq;
492
493 tqp = arg;
494 tq = *tqp;
495 TQ_LOCK(tq);
496 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
497 taskqueue_run_locked(tq);
498 /*
499 * Because taskqueue_run() can drop tq_mutex, we need to
500 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
501 * meantime, which means we missed a wakeup.
502 */
503 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
504 break;
505 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
506 }
507 taskqueue_run_locked(tq);
508
509 /* rendezvous with thread that asked us to terminate */
510 tq->tq_tcount--;
511 wakeup_one(tq->tq_threads);
512 TQ_UNLOCK(tq);
513 kthread_exit();
514}
515
516void
517taskqueue_thread_enqueue(void *context)
518{
519 struct taskqueue **tqp, *tq;
520
521 tqp = context;
522 tq = *tqp;
523
524 mtx_assert(&tq->tq_mutex, MA_OWNED);
525 wakeup_one(tq);
526}
527
528TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
529 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
530 INTR_MPSAFE, &taskqueue_ih));
531
532TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
533 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
534 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
535
536TASKQUEUE_DEFINE_THREAD(thread);
537
538struct taskqueue *
539taskqueue_create_fast(const char *name, int mflags,
540 taskqueue_enqueue_fn enqueue, void *context)
541{
542 return _taskqueue_create(name, mflags, enqueue, context,
543 MTX_SPIN, "fast_taskqueue");
544}
545
546/* NB: for backwards compatibility */
547int
548taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
549{
550 return taskqueue_enqueue(queue, task);
551}
552
553static void *taskqueue_fast_ih;
554
555static void
556taskqueue_fast_enqueue(void *context)
557{
558 swi_sched(taskqueue_fast_ih, 0);
559}
560
561static void
562taskqueue_fast_run(void *dummy)
563{
564 taskqueue_run(taskqueue_fast);
565}
566
567TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
568 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
569 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
570
571int
572taskqueue_member(struct taskqueue *queue, struct thread *td)
573{
574 int i, j, ret = 0;
575
576 TQ_LOCK(queue);
577 for (i = 0, j = 0; ; i++) {
578 if (queue->tq_threads[i] == NULL)
579 continue;
580 if (queue->tq_threads[i] == td) {
581 ret = 1;
582 break;
583 }
584 if (++j >= queue->tq_tcount)
585 break;
586 }
587 TQ_UNLOCK(queue);
588 return (ret);
589}