subr_taskqueue.c revision 208624
10SN/A/*- 2157SN/A * Copyright (c) 2000 Doug Rabson 30SN/A * All rights reserved. 40SN/A * 50SN/A * Redistribution and use in source and binary forms, with or without 60SN/A * modification, are permitted provided that the following conditions 7157SN/A * are met: 80SN/A * 1. Redistributions of source code must retain the above copyright 9157SN/A * notice, this list of conditions and the following disclaimer. 100SN/A * 2. Redistributions in binary form must reproduce the above copyright 110SN/A * notice, this list of conditions and the following disclaimer in the 120SN/A * documentation and/or other materials provided with the distribution. 130SN/A * 140SN/A * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 150SN/A * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 160SN/A * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 170SN/A * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 180SN/A * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 190SN/A * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 200SN/A * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21157SN/A * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22157SN/A * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23157SN/A * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 240SN/A * SUCH DAMAGE. 250SN/A */ 260SN/A 270SN/A#include <sys/cdefs.h> 280SN/A__FBSDID("$FreeBSD: head/sys/kern/subr_taskqueue.c 208624 2010-05-28 18:15:34Z zml $"); 290SN/A 300SN/A#include <sys/param.h> 310SN/A#include <sys/systm.h> 320SN/A#include <sys/bus.h> 330SN/A#include <sys/interrupt.h> 340SN/A#include <sys/kernel.h> 350SN/A#include <sys/kthread.h> 360SN/A#include <sys/lock.h> 370SN/A#include <sys/malloc.h> 380SN/A#include <sys/mutex.h> 390SN/A#include <sys/proc.h> 400SN/A#include <sys/sched.h> 410SN/A#include <sys/taskqueue.h> 42#include <sys/unistd.h> 43#include <machine/stdarg.h> 44 45static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); 46static void *taskqueue_giant_ih; 47static void *taskqueue_ih; 48 49struct taskqueue { 50 STAILQ_HEAD(, task) tq_queue; 51 const char *tq_name; 52 taskqueue_enqueue_fn tq_enqueue; 53 void *tq_context; 54 struct mtx tq_mutex; 55 struct thread **tq_threads; 56 int tq_tcount; 57 int tq_spin; 58 int tq_flags; 59 int tq_tasks_running; 60 int tq_task_waiters; 61}; 62 63#define TQ_FLAGS_ACTIVE (1 << 0) 64#define TQ_FLAGS_BLOCKED (1 << 1) 65#define TQ_FLAGS_PENDING (1 << 2) 66 67static __inline void 68TQ_LOCK(struct taskqueue *tq) 69{ 70 if (tq->tq_spin) 71 mtx_lock_spin(&tq->tq_mutex); 72 else 73 mtx_lock(&tq->tq_mutex); 74} 75 76static __inline void 77TQ_UNLOCK(struct taskqueue *tq) 78{ 79 if (tq->tq_spin) 80 mtx_unlock_spin(&tq->tq_mutex); 81 else 82 mtx_unlock(&tq->tq_mutex); 83} 84 85static __inline int 86TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, 87 int t) 88{ 89 if (tq->tq_spin) 90 return (msleep_spin(p, m, wm, t)); 91 return (msleep(p, m, pri, wm, t)); 92} 93 94static struct taskqueue * 95_taskqueue_create(const char *name, int mflags, 96 taskqueue_enqueue_fn enqueue, void *context, 97 int mtxflags, const char *mtxname) 98{ 99 struct taskqueue *queue; 100 101 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); 102 if (!queue) 103 return NULL; 104 105 STAILQ_INIT(&queue->tq_queue); 106 queue->tq_name = name; 107 queue->tq_enqueue = enqueue; 108 queue->tq_context = context; 109 queue->tq_spin = (mtxflags & MTX_SPIN) != 0; 110 queue->tq_flags |= TQ_FLAGS_ACTIVE; 111 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags); 112 113 return queue; 114} 115 116struct taskqueue * 117taskqueue_create(const char *name, int mflags, 118 taskqueue_enqueue_fn enqueue, void *context) 119{ 120 return _taskqueue_create(name, mflags, enqueue, context, 121 MTX_DEF, "taskqueue"); 122} 123 124/* 125 * Signal a taskqueue thread to terminate. 126 */ 127static void 128taskqueue_terminate(struct thread **pp, struct taskqueue *tq) 129{ 130 131 while (tq->tq_tcount > 0) { 132 wakeup(tq); 133 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); 134 } 135} 136 137void 138taskqueue_free(struct taskqueue *queue) 139{ 140 141 TQ_LOCK(queue); 142 queue->tq_flags &= ~TQ_FLAGS_ACTIVE; 143 taskqueue_run(queue); 144 taskqueue_terminate(queue->tq_threads, queue); 145 mtx_destroy(&queue->tq_mutex); 146 free(queue->tq_threads, M_TASKQUEUE); 147 free(queue, M_TASKQUEUE); 148} 149 150int 151taskqueue_enqueue(struct taskqueue *queue, struct task *task) 152{ 153 struct task *ins; 154 struct task *prev; 155 156 TQ_LOCK(queue); 157 158 /* 159 * Count multiple enqueues. 160 */ 161 if (task->ta_pending) { 162 task->ta_pending++; 163 TQ_UNLOCK(queue); 164 return 0; 165 } 166 167 /* 168 * Optimise the case when all tasks have the same priority. 169 */ 170 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); 171 if (!prev || prev->ta_priority >= task->ta_priority) { 172 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); 173 } else { 174 prev = NULL; 175 for (ins = STAILQ_FIRST(&queue->tq_queue); ins; 176 prev = ins, ins = STAILQ_NEXT(ins, ta_link)) 177 if (ins->ta_priority < task->ta_priority) 178 break; 179 180 if (prev) 181 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); 182 else 183 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); 184 } 185 186 task->ta_pending = 1; 187 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) 188 queue->tq_enqueue(queue->tq_context); 189 else 190 queue->tq_flags |= TQ_FLAGS_PENDING; 191 192 TQ_UNLOCK(queue); 193 194 return 0; 195} 196 197void 198taskqueue_block(struct taskqueue *queue) 199{ 200 201 TQ_LOCK(queue); 202 queue->tq_flags |= TQ_FLAGS_BLOCKED; 203 TQ_UNLOCK(queue); 204} 205 206void 207taskqueue_unblock(struct taskqueue *queue) 208{ 209 210 TQ_LOCK(queue); 211 queue->tq_flags &= ~TQ_FLAGS_BLOCKED; 212 if (queue->tq_flags & TQ_FLAGS_PENDING) { 213 queue->tq_flags &= ~TQ_FLAGS_PENDING; 214 queue->tq_enqueue(queue->tq_context); 215 } 216 TQ_UNLOCK(queue); 217} 218 219void 220taskqueue_run(struct taskqueue *queue) 221{ 222 struct task *task; 223 int owned, pending; 224 225 owned = mtx_owned(&queue->tq_mutex); 226 if (!owned) 227 TQ_LOCK(queue); 228 while (STAILQ_FIRST(&queue->tq_queue)) { 229 /* 230 * Carefully remove the first task from the queue and 231 * zero its pending count. 232 */ 233 task = STAILQ_FIRST(&queue->tq_queue); 234 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); 235 pending = task->ta_pending; 236 task->ta_pending = 0; 237 queue->tq_tasks_running++; 238 TQ_UNLOCK(queue); 239 240 task->ta_func(task->ta_context, pending); 241 242 TQ_LOCK(queue); 243 queue->tq_tasks_running--; 244 if (queue->tq_task_waiters > 0) 245 wakeup(task); 246 } 247 248 /* 249 * For compatibility, unlock on return if the queue was not locked 250 * on entry, although this opens a race window. 251 */ 252 if (!owned) 253 TQ_UNLOCK(queue); 254} 255 256void 257taskqueue_drain(struct taskqueue *queue, struct task *task) 258{ 259 if (queue->tq_spin) { /* XXX */ 260 mtx_lock_spin(&queue->tq_mutex); 261 while (task->ta_pending != 0 || queue->tq_tasks_running > 0) { 262 queue->tq_task_waiters++; 263 msleep_spin(task, &queue->tq_mutex, "-", 0); 264 queue->tq_task_waiters--; 265 } 266 mtx_unlock_spin(&queue->tq_mutex); 267 } else { 268 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); 269 270 mtx_lock(&queue->tq_mutex); 271 while (task->ta_pending != 0 || queue->tq_tasks_running > 0) { 272 queue->tq_task_waiters++; 273 msleep(task, &queue->tq_mutex, PWAIT, "-", 0); 274 queue->tq_task_waiters--; 275 } 276 mtx_unlock(&queue->tq_mutex); 277 } 278} 279 280static void 281taskqueue_swi_enqueue(void *context) 282{ 283 swi_sched(taskqueue_ih, 0); 284} 285 286static void 287taskqueue_swi_run(void *dummy) 288{ 289 taskqueue_run(taskqueue_swi); 290} 291 292static void 293taskqueue_swi_giant_enqueue(void *context) 294{ 295 swi_sched(taskqueue_giant_ih, 0); 296} 297 298static void 299taskqueue_swi_giant_run(void *dummy) 300{ 301 taskqueue_run(taskqueue_swi_giant); 302} 303 304int 305taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 306 const char *name, ...) 307{ 308 va_list ap; 309 struct thread *td; 310 struct taskqueue *tq; 311 int i, error; 312 char ktname[MAXCOMLEN + 1]; 313 314 if (count <= 0) 315 return (EINVAL); 316 317 tq = *tqp; 318 319 va_start(ap, name); 320 vsnprintf(ktname, sizeof(ktname), name, ap); 321 va_end(ap); 322 323 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE, 324 M_NOWAIT | M_ZERO); 325 if (tq->tq_threads == NULL) { 326 printf("%s: no memory for %s threads\n", __func__, ktname); 327 return (ENOMEM); 328 } 329 330 for (i = 0; i < count; i++) { 331 if (count == 1) 332 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 333 &tq->tq_threads[i], RFSTOPPED, 0, ktname); 334 else 335 error = kthread_add(taskqueue_thread_loop, tqp, NULL, 336 &tq->tq_threads[i], RFSTOPPED, 0, 337 "%s_%d", ktname, i); 338 if (error) { 339 /* should be ok to continue, taskqueue_free will dtrt */ 340 printf("%s: kthread_add(%s): error %d", __func__, 341 ktname, error); 342 tq->tq_threads[i] = NULL; /* paranoid */ 343 } else 344 tq->tq_tcount++; 345 } 346 for (i = 0; i < count; i++) { 347 if (tq->tq_threads[i] == NULL) 348 continue; 349 td = tq->tq_threads[i]; 350 thread_lock(td); 351 sched_prio(td, pri); 352 sched_add(td, SRQ_BORING); 353 thread_unlock(td); 354 } 355 356 return (0); 357} 358 359void 360taskqueue_thread_loop(void *arg) 361{ 362 struct taskqueue **tqp, *tq; 363 364 tqp = arg; 365 tq = *tqp; 366 TQ_LOCK(tq); 367 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { 368 taskqueue_run(tq); 369 /* 370 * Because taskqueue_run() can drop tq_mutex, we need to 371 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the 372 * meantime, which means we missed a wakeup. 373 */ 374 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) 375 break; 376 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); 377 } 378 379 /* rendezvous with thread that asked us to terminate */ 380 tq->tq_tcount--; 381 wakeup_one(tq->tq_threads); 382 TQ_UNLOCK(tq); 383 kthread_exit(); 384} 385 386void 387taskqueue_thread_enqueue(void *context) 388{ 389 struct taskqueue **tqp, *tq; 390 391 tqp = context; 392 tq = *tqp; 393 394 mtx_assert(&tq->tq_mutex, MA_OWNED); 395 wakeup_one(tq); 396} 397 398TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL, 399 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 400 INTR_MPSAFE, &taskqueue_ih)); 401 402TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL, 403 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run, 404 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 405 406TASKQUEUE_DEFINE_THREAD(thread); 407 408struct taskqueue * 409taskqueue_create_fast(const char *name, int mflags, 410 taskqueue_enqueue_fn enqueue, void *context) 411{ 412 return _taskqueue_create(name, mflags, enqueue, context, 413 MTX_SPIN, "fast_taskqueue"); 414} 415 416/* NB: for backwards compatibility */ 417int 418taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task) 419{ 420 return taskqueue_enqueue(queue, task); 421} 422 423static void *taskqueue_fast_ih; 424 425static void 426taskqueue_fast_enqueue(void *context) 427{ 428 swi_sched(taskqueue_fast_ih, 0); 429} 430 431static void 432taskqueue_fast_run(void *dummy) 433{ 434 taskqueue_run(taskqueue_fast); 435} 436 437TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL, 438 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL, 439 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih)); 440 441int 442taskqueue_member(struct taskqueue *queue, struct thread *td) 443{ 444 int i, j, ret = 0; 445 446 TQ_LOCK(queue); 447 for (i = 0, j = 0; ; i++) { 448 if (queue->tq_threads[i] == NULL) 449 continue; 450 if (queue->tq_threads[i] == td) { 451 ret = 1; 452 break; 453 } 454 if (++j >= queue->tq_tcount) 455 break; 456 } 457 TQ_UNLOCK(queue); 458 return (ret); 459} 460