subr_taskqueue.c revision 211928
1/*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 211928 2010-08-28 08:38:03Z pjd $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/kernel.h> 35#include <sys/kthread.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/mutex.h> 39#include <sys/proc.h> 40#include <sys/sched.h> 41#include <sys/taskqueue.h> 42#include <sys/unistd.h> 43#include <machine/stdarg.h> 44 45static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 46static void *taskqueue_giant_ih; 47static void *taskqueue_ih; 48 49struct taskqueue { 50 STAILQ_HEAD(, task) tq_queue; 51 const char *tq_name; 52 taskqueue_enqueue_fn tq_enqueue; 53 void *tq_context; 54 struct task *tq_running; 55 struct mtx tq_mutex; 56 struct thread **tq_threads; 57 int tq_tcount; 58 int tq_spin; 59 int tq_flags; 60}; 61 62#define TQ_FLAGS_ACTIVE (1 << 0) 63#define TQ_FLAGS_BLOCKED (1 << 1) 64#define TQ_FLAGS_PENDING (1 << 2) 65 66static void taskqueue_run(struct taskqueue *, struct task **); 67 68static __inline void 69TQ_LOCK(struct taskqueue *tq) 70{ 71 if (tq->tq_spin) 72 mtx_lock_spin(&tq->tq_mutex); 73 else 74 mtx_lock(&tq->tq_mutex); 75} 76 77static __inline void 78TQ_UNLOCK(struct taskqueue *tq) 79{ 80 if (tq->tq_spin) 81 mtx_unlock_spin(&tq->tq_mutex); 82 else 83 mtx_unlock(&tq->tq_mutex); 84} 85 86static __inline int 87TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 88 int t) 89{ 90 if (tq->tq_spin) 91 return (msleep_spin(p, m, wm, t)); 92 return (msleep(p, m, pri, wm, t)); 93} 94 95static struct taskqueue * 96_taskqueue_create(const char *name, int mflags, 97 taskqueue_enqueue_fn enqueue, void *context, 98 int mtxflags, const char *mtxname) 99{ 100 struct taskqueue *queue; 101 102 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 103 if (!queue) 104 return NULL; 105 106 STAILQ_INIT(&queue->tq_queue); 107 queue->tq_name = name; 108 queue->tq_enqueue = enqueue; 109 queue->tq_context = context; 110 queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 111 queue->tq_flags |= TQ_FLAGS_ACTIVE; 112 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 113 114 return queue; 115} 116 117struct taskqueue * 118taskqueue_create(const char *name, int mflags, 119 taskqueue_enqueue_fn enqueue, void *context) 120{ 121 return _taskqueue_create(name, mflags, enqueue, context, 122 MTX_DEF, "taskqueue"); 123} 124 125/* 126 * Signal a taskqueue thread to terminate. 127 */ 128static void 129taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 130{ 131 132 while (tq->tq_tcount > 0) { 133 wakeup(tq); 134 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 135 } 136} 137 138void 139taskqueue_free(struct taskqueue *queue) 140{ 141 142 TQ_LOCK(queue); 143 queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 144 taskqueue_terminate(queue->tq_threads, queue); 145 mtx_destroy(&queue->tq_mutex); 146 free(queue->tq_threads, M_TASKQUEUE); 147 free(queue, M_TASKQUEUE); 148} 149 150int 151taskqueue_enqueue(struct taskqueue *queue, struct task *task) 152{ 153 struct task *ins; 154 struct task *prev; 155 156 TQ_LOCK(queue); 157 158 /* 159 * Count multiple enqueues. 160 */ 161 if (task->ta_pending) { 162 task->ta_pending++; 163 TQ_UNLOCK(queue); 164 return 0; 165 } 166 167 /* 168 * Optimise the case when all tasks have the same priority. 169 */ 170 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 171 if (!prev || prev->ta_priority >= task->ta_priority) { 172 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 173 } else { 174 prev = NULL; 175 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 176 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 177 if (ins->ta_priority < task->ta_priority) 178 break; 179 180 if (prev) 181 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 182 else 183 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 184 } 185 186 task->ta_pending = 1; 187 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 188 queue->tq_enqueue(queue->tq_context); 189 else 190 queue->tq_flags |= TQ_FLAGS_PENDING; 191 192 TQ_UNLOCK(queue); 193 194 return 0; 195} 196 197void 198taskqueue_block(struct taskqueue *queue) 199{ 200 201 TQ_LOCK(queue); 202 queue->tq_flags |= TQ_FLAGS_BLOCKED; 203 TQ_UNLOCK(queue); 204} 205 206void 207taskqueue_unblock(struct taskqueue *queue) 208{ 209 210 TQ_LOCK(queue); 211 queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 212 if (queue->tq_flags & TQ_FLAGS_PENDING) { 213 queue->tq_flags &= ~TQ_FLAGS_PENDING; 214 queue->tq_enqueue(queue->tq_context); 215 } 216 TQ_UNLOCK(queue); 217} 218 219static void 220taskqueue_run(struct taskqueue *queue, struct task **tpp) 221{ 222 struct task *task; 223 int pending; 224 225 mtx_assert(&queue->tq_mutex, MA_OWNED); 226 while (STAILQ_FIRST(&queue->tq_queue)) { 227 /* 228 * Carefully remove the first task from the queue and 229 * zero its pending count. 230 */ 231 task = STAILQ_FIRST(&queue->tq_queue); 232 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 233 pending = task->ta_pending; 234 task->ta_pending = 0; 235 task->ta_running = tpp; 236 *tpp = task; 237 TQ_UNLOCK(queue); 238 239 task->ta_func(task->ta_context, pending); 240 241 TQ_LOCK(queue); 242 *tpp = NULL; 243 wakeup(task); 244 } 245} 246 247void 248taskqueue_drain(struct taskqueue *queue, struct task *task) 249{ 250 251 if (!queue->tq_spin) 252 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 253 254 TQ_LOCK(queue); 255 while (task->ta_pending != 0 || 256 (task->ta_running != NULL && task == *task->ta_running)) { 257 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0); 258 } 259 TQ_UNLOCK(queue); 260} 261 262static void 263taskqueue_swi_enqueue(void *context) 264{ 265 swi_sched(taskqueue_ih, 0); 266} 267 268static void 269taskqueue_swi_run(void *dummy) 270{ 271 TQ_LOCK(taskqueue_swi); 272 taskqueue_run(taskqueue_swi, &taskqueue_swi->tq_running); 273 TQ_UNLOCK(taskqueue_swi); 274} 275 276static void 277taskqueue_swi_giant_enqueue(void *context) 278{ 279 swi_sched(taskqueue_giant_ih, 0); 280} 281 282static void 283taskqueue_swi_giant_run(void *dummy) 284{ 285 TQ_LOCK(taskqueue_swi_giant); 286 taskqueue_run(taskqueue_swi_giant, &taskqueue_swi_giant->tq_running); 287 TQ_UNLOCK(taskqueue_swi_giant); 288} 289 290int 291taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 292 const char *name, ...) 293{ 294 va_list ap; 295 struct thread *td; 296 struct taskqueue *tq; 297 int i, error; 298 char ktname[MAXCOMLEN + 1]; 299 300 if (count <= 0) 301 return (EINVAL); 302 303 tq = *tqp; 304 305 va_start(ap, name); 306 vsnprintf(ktname, sizeof(ktname), name, ap); 307 va_end(ap); 308 309 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 310 M_NOWAIT | M_ZERO); 311 if (tq->tq_threads == NULL) { 312 printf("%s: no memory for %s threads\n", __func__, ktname); 313 return (ENOMEM); 314 } 315 316 for (i = 0; i < count; i++) { 317 if (count == 1) 318 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 319 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); 320 else 321 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 322 &tq->tq_threads[i], RFSTOPPED, 0, 323 "%s_%d", ktname, i); 324 if (error) { 325 /* should be ok to continue, taskqueue_free will dtrt */ 326 printf("%s: kthread_add(%s): error %d", __func__, 327 ktname, error); 328 tq->tq_threads[i] = NULL; /* paranoid */ 329 } else 330 tq->tq_tcount++; 331 } 332 for (i = 0; i < count; i++) { 333 if (tq->tq_threads[i] == NULL) 334 continue; 335 td = tq->tq_threads[i]; 336 thread_lock(td); 337 sched_prio(td, pri); 338 sched_add(td, SRQ_BORING); 339 thread_unlock(td); 340 } 341 342 return (0); 343} 344 345void 346taskqueue_thread_loop(void *arg) 347{ 348 struct taskqueue **tqp, *tq; 349 struct task *running; 350 351 /* 352 * The kernel stack space is globaly addressable, and it would 353 * be an error to ask whether a task is running after the 354 * taskqueue has been released. So it is safe to have the 355 * task point back to an address in the taskqueue's stack to 356 * determine if the task is running. 357 */ 358 running = NULL; 359 360 tqp = arg; 361 tq = *tqp; 362 TQ_LOCK(tq); 363 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 364 taskqueue_run(tq, &running); 365 /* 366 * Because taskqueue_run() can drop tq_mutex, we need to 367 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 368 * meantime, which means we missed a wakeup. 369 */ 370 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 371 break; 372 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 373 } 374 taskqueue_run(tq, &running); 375 376 /* rendezvous with thread that asked us to terminate */ 377 tq->tq_tcount--; 378 wakeup_one(tq->tq_threads); 379 TQ_UNLOCK(tq); 380 kthread_exit(); 381} 382 383void 384taskqueue_thread_enqueue(void *context) 385{ 386 struct taskqueue **tqp, *tq; 387 388 tqp = context; 389 tq = *tqp; 390 391 mtx_assert(&tq->tq_mutex, MA_OWNED); 392 wakeup_one(tq); 393} 394 395TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 396 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 397 INTR_MPSAFE, &taskqueue_ih)); 398 399TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 400 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 401 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 402 403TASKQUEUE_DEFINE_THREAD(thread); 404 405struct taskqueue * 406taskqueue_create_fast(const char *name, int mflags, 407 taskqueue_enqueue_fn enqueue, void *context) 408{ 409 return _taskqueue_create(name, mflags, enqueue, context, 410 MTX_SPIN, "fast_taskqueue"); 411} 412 413/* NB: for backwards compatibility */ 414int 415taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 416{ 417 return taskqueue_enqueue(queue, task); 418} 419 420static void *taskqueue_fast_ih; 421 422static void 423taskqueue_fast_enqueue(void *context) 424{ 425 swi_sched(taskqueue_fast_ih, 0); 426} 427 428static void 429taskqueue_fast_run(void *dummy) 430{ 431 TQ_LOCK(taskqueue_fast); 432 taskqueue_run(taskqueue_fast, &taskqueue_fast->tq_running); 433 TQ_UNLOCK(taskqueue_fast); 434} 435 436TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 437 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 438 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 439 440int 441taskqueue_member(struct taskqueue *queue, struct thread *td) 442{ 443 int i, j, ret = 0; 444 445 TQ_LOCK(queue); 446 for (i = 0, j = 0; ; i++) { 447 if (queue->tq_threads[i] == NULL) 448 continue; 449 if (queue->tq_threads[i] == td) { 450 ret = 1; 451 break; 452 } 453 if (++j >= queue->tq_tcount) 454 break; 455 } 456 TQ_UNLOCK(queue); 457 return (ret); 458} 459