subr_taskqueue.c revision 207439
1/*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 207439 2010-04-30 16:29:05Z zml $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/bus.h> 33#include <sys/interrupt.h> 34#include <sys/kernel.h> 35#include <sys/kthread.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/mutex.h> 39#include <sys/proc.h> 40#include <sys/sched.h> 41#include <sys/taskqueue.h> 42#include <sys/unistd.h> 43#include <machine/stdarg.h> 44 45static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 46static void *taskqueue_giant_ih; 47static void *taskqueue_ih; 48 49struct taskqueue { 50 STAILQ_HEAD(, task) tq_queue; 51 const char *tq_name; 52 taskqueue_enqueue_fn tq_enqueue; 53 void *tq_context; 54 struct mtx tq_mutex; 55 struct thread **tq_threads; 56 int tq_tcount; 57 int tq_spin; 58 int tq_flags; 59}; 60 61#define TQ_FLAGS_ACTIVE (1 << 0) 62#define TQ_FLAGS_BLOCKED (1 << 1) 63#define TQ_FLAGS_PENDING (1 << 2) 64 65static __inline void 66TQ_LOCK(struct taskqueue *tq) 67{ 68 if (tq->tq_spin) 69 mtx_lock_spin(&tq->tq_mutex); 70 else 71 mtx_lock(&tq->tq_mutex); 72} 73 74static __inline void 75TQ_UNLOCK(struct taskqueue *tq) 76{ 77 if (tq->tq_spin) 78 mtx_unlock_spin(&tq->tq_mutex); 79 else 80 mtx_unlock(&tq->tq_mutex); 81} 82 83static __inline int 84TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 85 int t) 86{ 87 if (tq->tq_spin) 88 return (msleep_spin(p, m, wm, t)); 89 return (msleep(p, m, pri, wm, t)); 90} 91 92static struct taskqueue * 93_taskqueue_create(const char *name, int mflags, 94 taskqueue_enqueue_fn enqueue, void *context, 95 int mtxflags, const char *mtxname) 96{ 97 struct taskqueue *queue; 98 99 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 100 if (!queue) 101 return NULL; 102 103 STAILQ_INIT(&queue->tq_queue); 104 queue->tq_name = name; 105 queue->tq_enqueue = enqueue; 106 queue->tq_context = context; 107 queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 108 queue->tq_flags |= TQ_FLAGS_ACTIVE; 109 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 110 111 return queue; 112} 113 114struct taskqueue * 115taskqueue_create(const char *name, int mflags, 116 taskqueue_enqueue_fn enqueue, void *context) 117{ 118 return _taskqueue_create(name, mflags, enqueue, context, 119 MTX_DEF, "taskqueue"); 120} 121 122/* 123 * Signal a taskqueue thread to terminate. 124 */ 125static void 126taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 127{ 128 129 while (tq->tq_tcount > 0) { 130 wakeup(tq); 131 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 132 } 133} 134 135void 136taskqueue_free(struct taskqueue *queue) 137{ 138 139 TQ_LOCK(queue); 140 queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 141 taskqueue_run(queue); 142 taskqueue_terminate(queue->tq_threads, queue); 143 mtx_destroy(&queue->tq_mutex); 144 free(queue->tq_threads, M_TASKQUEUE); 145 free(queue, M_TASKQUEUE); 146} 147 148int 149taskqueue_enqueue(struct taskqueue *queue, struct task *task) 150{ 151 struct task *ins; 152 struct task *prev; 153 154 TQ_LOCK(queue); 155 156 /* 157 * Count multiple enqueues. 158 */ 159 if (task->ta_pending) { 160 task->ta_pending++; 161 TQ_UNLOCK(queue); 162 return 0; 163 } 164 165 /* 166 * Optimise the case when all tasks have the same priority. 167 */ 168 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 169 if (!prev || prev->ta_priority >= task->ta_priority) { 170 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 171 } else { 172 prev = NULL; 173 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 174 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 175 if (ins->ta_priority < task->ta_priority) 176 break; 177 178 if (prev) 179 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 180 else 181 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 182 } 183 184 task->ta_pending = 1; 185 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 186 queue->tq_enqueue(queue->tq_context); 187 else 188 queue->tq_flags |= TQ_FLAGS_PENDING; 189 190 TQ_UNLOCK(queue); 191 192 return 0; 193} 194 195void 196taskqueue_block(struct taskqueue *queue) 197{ 198 199 TQ_LOCK(queue); 200 queue->tq_flags |= TQ_FLAGS_BLOCKED; 201 TQ_UNLOCK(queue); 202} 203 204void 205taskqueue_unblock(struct taskqueue *queue) 206{ 207 208 TQ_LOCK(queue); 209 queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 210 if (queue->tq_flags & TQ_FLAGS_PENDING) { 211 queue->tq_flags &= ~TQ_FLAGS_PENDING; 212 queue->tq_enqueue(queue->tq_context); 213 } 214 TQ_UNLOCK(queue); 215} 216 217void 218taskqueue_run(struct taskqueue *queue) 219{ 220 struct task *task; 221 int owned, pending; 222 223 owned = mtx_owned(&queue->tq_mutex); 224 if (!owned) 225 TQ_LOCK(queue); 226 while (STAILQ_FIRST(&queue->tq_queue)) { 227 /* 228 * Carefully remove the first task from the queue and 229 * zero its pending count. 230 */ 231 task = STAILQ_FIRST(&queue->tq_queue); 232 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 233 pending = task->ta_pending; 234 task->ta_pending = 0; 235 task->ta_flags |= TA_FLAGS_RUNNING; 236 TQ_UNLOCK(queue); 237 238 task->ta_func(task->ta_context, pending); 239 240 TQ_LOCK(queue); 241 task->ta_flags &= ~TA_FLAGS_RUNNING; 242 wakeup(task); 243 } 244 245 /* 246 * For compatibility, unlock on return if the queue was not locked 247 * on entry, although this opens a race window. 248 */ 249 if (!owned) 250 TQ_UNLOCK(queue); 251} 252 253void 254taskqueue_drain(struct taskqueue *queue, struct task *task) 255{ 256 if (queue->tq_spin) { /* XXX */ 257 mtx_lock_spin(&queue->tq_mutex); 258 while (task->ta_pending != 0 || 259 (task->ta_flags & TA_FLAGS_RUNNING) != 0) 260 msleep_spin(task, &queue->tq_mutex, "-", 0); 261 mtx_unlock_spin(&queue->tq_mutex); 262 } else { 263 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 264 265 mtx_lock(&queue->tq_mutex); 266 while (task->ta_pending != 0 || 267 (task->ta_flags & TA_FLAGS_RUNNING) != 0) 268 msleep(task, &queue->tq_mutex, PWAIT, "-", 0); 269 mtx_unlock(&queue->tq_mutex); 270 } 271} 272 273static void 274taskqueue_swi_enqueue(void *context) 275{ 276 swi_sched(taskqueue_ih, 0); 277} 278 279static void 280taskqueue_swi_run(void *dummy) 281{ 282 taskqueue_run(taskqueue_swi); 283} 284 285static void 286taskqueue_swi_giant_enqueue(void *context) 287{ 288 swi_sched(taskqueue_giant_ih, 0); 289} 290 291static void 292taskqueue_swi_giant_run(void *dummy) 293{ 294 taskqueue_run(taskqueue_swi_giant); 295} 296 297int 298taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 299 const char *name, ...) 300{ 301 va_list ap; 302 struct thread *td; 303 struct taskqueue *tq; 304 int i, error; 305 char ktname[MAXCOMLEN + 1]; 306 307 if (count <= 0) 308 return (EINVAL); 309 310 tq = *tqp; 311 312 va_start(ap, name); 313 vsnprintf(ktname, sizeof(ktname), name, ap); 314 va_end(ap); 315 316 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 317 M_NOWAIT | M_ZERO); 318 if (tq->tq_threads == NULL) { 319 printf("%s: no memory for %s threads\n", __func__, ktname); 320 return (ENOMEM); 321 } 322 323 for (i = 0; i < count; i++) { 324 if (count == 1) 325 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 326 &tq->tq_threads[i], RFSTOPPED, 0, ktname); 327 else 328 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 329 &tq->tq_threads[i], RFSTOPPED, 0, 330 "%s_%d", ktname, i); 331 if (error) { 332 /* should be ok to continue, taskqueue_free will dtrt */ 333 printf("%s: kthread_add(%s): error %d", __func__, 334 ktname, error); 335 tq->tq_threads[i] = NULL; /* paranoid */ 336 } else 337 tq->tq_tcount++; 338 } 339 for (i = 0; i < count; i++) { 340 if (tq->tq_threads[i] == NULL) 341 continue; 342 td = tq->tq_threads[i]; 343 thread_lock(td); 344 sched_prio(td, pri); 345 sched_add(td, SRQ_BORING); 346 thread_unlock(td); 347 } 348 349 return (0); 350} 351 352void 353taskqueue_thread_loop(void *arg) 354{ 355 struct taskqueue **tqp, *tq; 356 357 tqp = arg; 358 tq = *tqp; 359 TQ_LOCK(tq); 360 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 361 taskqueue_run(tq); 362 /* 363 * Because taskqueue_run() can drop tq_mutex, we need to 364 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 365 * meantime, which means we missed a wakeup. 366 */ 367 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 368 break; 369 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 370 } 371 372 /* rendezvous with thread that asked us to terminate */ 373 tq->tq_tcount--; 374 wakeup_one(tq->tq_threads); 375 TQ_UNLOCK(tq); 376 kthread_exit(); 377} 378 379void 380taskqueue_thread_enqueue(void *context) 381{ 382 struct taskqueue **tqp, *tq; 383 384 tqp = context; 385 tq = *tqp; 386 387 mtx_assert(&tq->tq_mutex, MA_OWNED); 388 wakeup_one(tq); 389} 390 391TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 392 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 393 INTR_MPSAFE, &taskqueue_ih)); 394 395TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 396 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 397 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 398 399TASKQUEUE_DEFINE_THREAD(thread); 400 401struct taskqueue * 402taskqueue_create_fast(const char *name, int mflags, 403 taskqueue_enqueue_fn enqueue, void *context) 404{ 405 return _taskqueue_create(name, mflags, enqueue, context, 406 MTX_SPIN, "fast_taskqueue"); 407} 408 409/* NB: for backwards compatibility */ 410int 411taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 412{ 413 return taskqueue_enqueue(queue, task); 414} 415 416static void *taskqueue_fast_ih; 417 418static void 419taskqueue_fast_enqueue(void *context) 420{ 421 swi_sched(taskqueue_fast_ih, 0); 422} 423 424static void 425taskqueue_fast_run(void *dummy) 426{ 427 taskqueue_run(taskqueue_fast); 428} 429 430TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 431 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 432 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 433 434int 435taskqueue_member(struct taskqueue *queue, struct thread *td) 436{ 437 int i, j, ret = 0; 438 439 TQ_LOCK(queue); 440 for (i = 0, j = 0; ; i++) { 441 if (queue->tq_threads[i] == NULL) 442 continue; 443 if (queue->tq_threads[i] == td) { 444 ret = 1; 445 break; 446 } 447 if (++j >= queue->tq_tcount) 448 break; 449 } 450 TQ_UNLOCK(queue); 451 return (ret); 452} 453